hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794a337fa34ea05c4b7db962287254e39caa5cfe
| 935
|
py
|
Python
|
slybot/setup.py
|
ruairif/portia
|
175b1f5bbec50fda6adda042481bd09c77d12bf0
|
[
"BSD-3-Clause"
] | null | null | null |
slybot/setup.py
|
ruairif/portia
|
175b1f5bbec50fda6adda042481bd09c77d12bf0
|
[
"BSD-3-Clause"
] | null | null | null |
slybot/setup.py
|
ruairif/portia
|
175b1f5bbec50fda6adda042481bd09c77d12bf0
|
[
"BSD-3-Clause"
] | null | null | null |
from slybot import __version__
from setuptools import setup, find_packages
install_requires = ['Scrapy', 'scrapely', 'loginform', 'lxml', 'jsonschema']
tests_requires = install_requires
setup(name='slybot',
version=__version__,
license='BSD',
description='Slybot crawler',
author='Scrapy project',
author_email='info@scrapy.org',
url='http://github.com/scrapy/slybot',
packages=find_packages(exclude=('tests', 'tests.*')),
platforms=['Any'],
scripts=['bin/slybot', 'bin/portiacrawl'],
install_requires=install_requires,
tests_requires=tests_requires,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'
])
| 34.62963
| 76
| 0.635294
|
794a347def02cfe42aebfd0ddec9c3c6b5623633
| 579
|
py
|
Python
|
var/spack/repos/builtin/packages/py-editdistance/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-editdistance/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-editdistance/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyEditdistance(PythonPackage):
"""Fast implementation of the edit distance (Levenshtein distance)."""
homepage = "https://github.com/aflc/editdistance"
pypi = "editdistance/editdistance-0.4.tar.gz"
version('0.4', sha256='c765db6f8817d38922e4a50be4b9ab338b2c539377b6fcf0bca11dea72eeb8c1')
depends_on('py-setuptools', type='build')
| 32.166667
| 93
| 0.756477
|
794a35105ef7c5e401f1e0a7fade262f4d84d207
| 3,195
|
py
|
Python
|
sympy/matrices/expressions/tests/test_applyfunc.py
|
bigfooted/sympy
|
1fb2490fa2fa9b476da450f02a25b03c1dc07cf0
|
[
"BSD-3-Clause"
] | 2
|
2021-05-04T16:34:36.000Z
|
2021-05-04T16:34:39.000Z
|
sympy/matrices/expressions/tests/test_applyfunc.py
|
bigfooted/sympy
|
1fb2490fa2fa9b476da450f02a25b03c1dc07cf0
|
[
"BSD-3-Clause"
] | 10
|
2021-07-21T20:56:57.000Z
|
2021-07-31T16:35:28.000Z
|
sympy/matrices/expressions/tests/test_applyfunc.py
|
bigfooted/sympy
|
1fb2490fa2fa9b476da450f02a25b03c1dc07cf0
|
[
"BSD-3-Clause"
] | 2
|
2020-09-22T13:23:08.000Z
|
2020-09-25T05:12:28.000Z
|
from sympy.core.symbol import symbols, Dummy
from sympy.matrices.expressions.applyfunc import ElementwiseApplyFunction
from sympy import Matrix, Lambda, MatrixSymbol, exp, MatMul, sin, simplify
from sympy.testing.pytest import raises
from sympy.matrices.common import ShapeError
X = MatrixSymbol("X", 3, 3)
Y = MatrixSymbol("Y", 3, 3)
k = symbols("k")
Xk = MatrixSymbol("X", k, k)
Xd = X.as_explicit()
x, y, z, t = symbols("x y z t")
def test_applyfunc_matrix():
x = Dummy('x')
double = Lambda(x, x**2)
expr = ElementwiseApplyFunction(double, Xd)
assert isinstance(expr, ElementwiseApplyFunction)
assert expr.doit() == Xd.applyfunc(lambda x: x**2)
assert expr.shape == (3, 3)
assert expr.func(*expr.args) == expr
assert simplify(expr) == expr
assert expr[0, 0] == double(Xd[0, 0])
expr = ElementwiseApplyFunction(double, X)
assert isinstance(expr, ElementwiseApplyFunction)
assert isinstance(expr.doit(), ElementwiseApplyFunction)
assert expr == X.applyfunc(double)
assert expr.func(*expr.args) == expr
expr = ElementwiseApplyFunction(exp, X*Y)
assert expr.expr == X*Y
assert expr.function.dummy_eq(Lambda(x, exp(x)))
assert expr.dummy_eq((X*Y).applyfunc(exp))
assert expr.func(*expr.args) == expr
assert isinstance(X*expr, MatMul)
assert (X*expr).shape == (3, 3)
Z = MatrixSymbol("Z", 2, 3)
assert (Z*expr).shape == (2, 3)
expr = ElementwiseApplyFunction(exp, Z.T)*ElementwiseApplyFunction(exp, Z)
assert expr.shape == (3, 3)
expr = ElementwiseApplyFunction(exp, Z)*ElementwiseApplyFunction(exp, Z.T)
assert expr.shape == (2, 2)
raises(ShapeError, lambda: ElementwiseApplyFunction(exp, Z)*ElementwiseApplyFunction(exp, Z))
M = Matrix([[x, y], [z, t]])
expr = ElementwiseApplyFunction(sin, M)
assert isinstance(expr, ElementwiseApplyFunction)
assert expr.function.dummy_eq(Lambda(x, sin(x)))
assert expr.expr == M
assert expr.doit() == M.applyfunc(sin)
assert expr.doit() == Matrix([[sin(x), sin(y)], [sin(z), sin(t)]])
assert expr.func(*expr.args) == expr
expr = ElementwiseApplyFunction(double, Xk)
assert expr.doit() == expr
assert expr.subs(k, 2).shape == (2, 2)
assert (expr*expr).shape == (k, k)
M = MatrixSymbol("M", k, t)
expr2 = M.T*expr*M
assert isinstance(expr2, MatMul)
assert expr2.args[1] == expr
assert expr2.shape == (t, t)
expr3 = expr*M
assert expr3.shape == (k, t)
raises(ShapeError, lambda: M*expr)
expr1 = ElementwiseApplyFunction(lambda x: x+1, Xk)
expr2 = ElementwiseApplyFunction(lambda x: x, Xk)
assert expr1 != expr2
def test_applyfunc_entry():
af = X.applyfunc(sin)
assert af[0, 0] == sin(X[0, 0])
af = Xd.applyfunc(sin)
assert af[0, 0] == sin(X[0, 0])
def test_applyfunc_as_explicit():
af = X.applyfunc(sin)
assert af.as_explicit() == Matrix([
[sin(X[0, 0]), sin(X[0, 1]), sin(X[0, 2])],
[sin(X[1, 0]), sin(X[1, 1]), sin(X[1, 2])],
[sin(X[2, 0]), sin(X[2, 1]), sin(X[2, 2])],
])
def test_applyfunc_transpose():
af = Xk.applyfunc(sin)
assert af.T.dummy_eq(Xk.T.applyfunc(sin))
| 30.141509
| 97
| 0.644444
|
794a35bc84cf1f1dea8b8368eede33986c3273d9
| 7,368
|
py
|
Python
|
nipype/pipeline/plugins/slurmgraph.py
|
sebastientourbier/nipype
|
99c5904176481520c5bf42a501aae1a12184e672
|
[
"Apache-2.0"
] | 2
|
2019-01-25T18:20:51.000Z
|
2019-07-30T20:51:51.000Z
|
nipype/pipeline/plugins/slurmgraph.py
|
sebastientourbier/nipype
|
99c5904176481520c5bf42a501aae1a12184e672
|
[
"Apache-2.0"
] | null | null | null |
nipype/pipeline/plugins/slurmgraph.py
|
sebastientourbier/nipype
|
99c5904176481520c5bf42a501aae1a12184e672
|
[
"Apache-2.0"
] | 2
|
2018-01-25T19:48:17.000Z
|
2019-01-25T18:20:52.000Z
|
# -*- coding: utf-8 -*-
"""Parallel workflow execution via SLURM
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import open
import os
import sys
from ...interfaces.base import CommandLine
from .base import (GraphPluginBase, logger)
def node_completed_status(checknode):
"""
A function to determine if a node has previously completed it's work
:param checknode: The node to check the run status
:return: boolean value True indicates that the node does not need to be run.
"""
""" TODO: place this in the base.py file and refactor """
node_state_does_not_require_overwrite = (checknode.overwrite is False or
(checknode.overwrite is None and not
checknode._interface.always_run)
)
hash_exists = False
try:
hash_exists, _, _, _ = checknode.hash_exists()
except Exception:
hash_exists = False
return (hash_exists and node_state_does_not_require_overwrite)
class SLURMGraphPlugin(GraphPluginBase):
"""Execute using SLURM
The plugin_args input to run can be used to control the SGE execution.
Currently supported options are:
- template : template to use for batch job submission
- qsub_args : arguments to be prepended to the job execution script in the
qsub call
"""
_template = "#!/bin/bash"
def __init__(self, **kwargs):
if 'plugin_args' in kwargs and kwargs['plugin_args']:
if 'retry_timeout' in kwargs['plugin_args']:
self._retry_timeout = kwargs['plugin_args']['retry_timeout']
if 'max_tries' in kwargs['plugin_args']:
self._max_tries = kwargs['plugin_args']['max_tries']
if 'template' in kwargs['plugin_args']:
self._template = kwargs['plugin_args']['template']
if os.path.isfile(self._template):
self._template = open(self._template).read()
if 'sbatch_args' in kwargs['plugin_args']:
self._sbatch_args = kwargs['plugin_args']['sbatch_args']
if 'dont_resubmit_completed_jobs' in kwargs['plugin_args']:
self._dont_resubmit_completed_jobs = kwargs['plugin_args']['dont_resubmit_completed_jobs']
else:
self._dont_resubmit_completed_jobs = False
super(SLURMGraphPlugin, self).__init__(**kwargs)
def _submit_graph(self, pyfiles, dependencies, nodes):
def make_job_name(jobnumber, nodeslist):
"""
- jobnumber: The index number of the job to create
- nodeslist: The name of the node being processed
- return: A string representing this job to be displayed by SLURM
"""
job_name = 'j{0}_{1}'.format(jobnumber, nodeslist[jobnumber]._id)
# Condition job_name to be a valid bash identifier (i.e. - is invalid)
job_name = job_name.replace('-', '_').replace('.', '_').replace(':', '_')
return job_name
batch_dir, _ = os.path.split(pyfiles[0])
submitjobsfile = os.path.join(batch_dir, 'submit_jobs.sh')
cache_doneness_per_node = dict()
if self._dont_resubmit_completed_jobs: # A future parameter for controlling this behavior could be added here
for idx, pyscript in enumerate(pyfiles):
node = nodes[idx]
node_status_done = node_completed_status(node)
# if the node itself claims done, then check to ensure all
# dependancies are also done
if node_status_done and idx in dependencies:
for child_idx in dependencies[idx]:
if child_idx in cache_doneness_per_node:
child_status_done = cache_doneness_per_node[child_idx]
else:
child_status_done = node_completed_status(nodes[child_idx])
node_status_done = node_status_done and child_status_done
cache_doneness_per_node[idx] = node_status_done
with open(submitjobsfile, 'wt') as fp:
fp.writelines('#!/usr/bin/env bash\n')
fp.writelines('# Condense format attempted\n')
for idx, pyscript in enumerate(pyfiles):
node = nodes[idx]
if cache_doneness_per_node.get(idx, False):
continue
else:
template, sbatch_args = self._get_args(
node, ["template", "sbatch_args"])
batch_dir, name = os.path.split(pyscript)
name = '.'.join(name.split('.')[:-1])
batchscript = '\n'.join((template,
'%s %s' % (sys.executable, pyscript)))
batchscriptfile = os.path.join(batch_dir,
'batchscript_%s.sh' % name)
batchscriptoutfile = batchscriptfile + '.o'
batchscripterrfile = batchscriptfile + '.e'
with open(batchscriptfile, 'wt') as batchfp:
batchfp.writelines(batchscript)
batchfp.close()
deps = ''
if idx in dependencies:
values = ''
for jobid in dependencies[idx]:
# Avoid dependancies of done jobs
if not self._dont_resubmit_completed_jobs or cache_doneness_per_node[jobid] == False:
values += "${{{0}}}:".format(make_job_name(jobid, nodes))
if values != '': # i.e. if some jobs were added to dependency list
values = values.rstrip(':')
deps = '--dependency=afterok:%s' % values
jobname = make_job_name(idx, nodes)
# Do not use default output locations if they are set in self._sbatch_args
stderrFile = ''
if self._sbatch_args.count('-e ') == 0:
stderrFile = '-e {errFile}'.format(
errFile=batchscripterrfile)
stdoutFile = ''
if self._sbatch_args.count('-o ') == 0:
stdoutFile = '-o {outFile}'.format(
outFile=batchscriptoutfile)
full_line = '{jobNm}=$(sbatch {outFileOption} {errFileOption} {extraSBatchArgs} {dependantIndex} -J {jobNm} {batchscript} | awk \'/^Submitted/ {{print $4}}\')\n'.format(
jobNm=jobname,
outFileOption=stdoutFile,
errFileOption=stderrFile,
extraSBatchArgs=sbatch_args,
dependantIndex=deps,
batchscript=batchscriptfile)
fp.writelines(full_line)
cmd = CommandLine('bash', environ=dict(os.environ),
terminal_output='allatonce')
cmd.inputs.args = '%s' % submitjobsfile
cmd.run()
logger.info('submitted all jobs to queue')
| 48.156863
| 189
| 0.55361
|
794a35f5f75387b51a3a29b51fb88c676ccb7170
| 6,859
|
py
|
Python
|
src/ansible_navigator/ui_framework/curses_window.py
|
ekmixon/ansible-navigator
|
9903d82ac76a4aee61a64c2e5f19f5ccca3cf136
|
[
"Apache-2.0",
"MIT"
] | 134
|
2021-03-26T17:44:49.000Z
|
2022-03-31T13:15:52.000Z
|
src/ansible_navigator/ui_framework/curses_window.py
|
cidrblock/ansible-navigator
|
674e5edce4d4181e6f79b6f24b590a347156665d
|
[
"Apache-2.0",
"MIT"
] | 631
|
2021-03-26T19:38:32.000Z
|
2022-03-31T22:57:36.000Z
|
src/ansible_navigator/ui_framework/curses_window.py
|
cidrblock/ansible-navigator
|
674e5edce4d4181e6f79b6f24b590a347156665d
|
[
"Apache-2.0",
"MIT"
] | 48
|
2021-03-26T17:44:29.000Z
|
2022-03-08T21:12:26.000Z
|
"""type for curses window
"""
import curses
import json
import logging
from typing import TYPE_CHECKING
from typing import Union
from .colorize import hex_to_rgb_curses
from .curses_defs import CursesLine
from .ui_config import UIConfig
if TYPE_CHECKING:
# pylint: disable= no-name-in-module
from _curses import _CursesWindow
Window = _CursesWindow
else:
from typing import Any
Window = Any
COLOR_MAP = {
"terminal.ansiBlack": 0,
"terminal.ansiRed": 1,
"terminal.ansiGreen": 2,
"terminal.ansiYellow": 3,
"terminal.ansiBlue": 4,
"terminal.ansiMagenta": 5,
"terminal.ansiCyan": 6,
"terminal.ansiWhite": 7,
"terminal.ansiBrightBlack": 8,
"terminal.ansiBrightRed": 9,
"terminal.ansiBrightGreen": 10,
"terminal.ansiBrightYellow": 11,
"terminal.ansiBrightBlue": 12,
"terminal.ansiBrightMagenta": 13,
"terminal.ansiBrightCyan": 14,
"terminal.ansiBrightWhite": 15,
}
class CursesWindow:
# pylint: disable=too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""abstraction for a curses window"""
def __init__(self, ui_config: UIConfig):
self._logger = logging.getLogger(__name__)
self._screen: Window
self.win: Window
self._screen_miny = 3
self._prefix_color = 8
self._theme_dir: str
self._term_osc4_supprt: bool
self._ui_config = ui_config
self._logger.debug("self._ui_config: %s", self._ui_config)
self._set_colors()
@property
def _screen_w(self) -> int:
"""return the screen width
:return: the current screen width
:rtype: int
"""
return self._screen.getmaxyx()[1]
@property
def _screen_h(self) -> int:
"""return the screen height, or notify if too small
:return: the current screen height
:rtype: int
"""
while True:
if self._screen.getmaxyx()[0] >= self._screen_miny:
return self._screen.getmaxyx()[0]
curses.flash()
curses.beep()
self._screen.refresh()
def _color_pair_or_none(self, color: int) -> Union[None, int]:
"""
Returns 0 if colors are disabled.
Otherwise returns the curses color pair by
taking mod (available colors)
and passing that.
"""
if not self._ui_config.color or curses.COLORS == 0:
return None
color_arg = color % curses.COLORS # self._number_colors
return curses.color_pair(color_arg)
def _curs_set(self, value: int):
"""in the case of a TERM with limited capabilities
log an error"""
try:
curses.curs_set(value)
except curses.error:
self._logger.error("Errors setting up terminal, check TERM value")
def _add_line(
self, window: Window, lineno: int, line: CursesLine, prefix: Union[str, None] = None
) -> None:
"""add a line to a window
:param window: A curses window
:type window: Window
:param lineno: the line number
:type lineno: int
:param line: The line to add
:type line: CursesLine
:param prefix: The prefix for the line
:type prefix: str or None
"""
win = window
if prefix:
color = self._color_pair_or_none(self._prefix_color)
if color is None:
win.addstr(lineno, 0, prefix)
else:
win.addstr(lineno, 0, prefix, color)
if line:
win.move(lineno, 0)
for line_part in line:
column = line_part.column + len(prefix or "")
if column <= self._screen_w:
text = line_part.string[0 : self._screen_w - column + 1]
try:
color = self._color_pair_or_none(line_part.color)
if color is None:
win.addstr(lineno, column, text)
else:
win.addstr(lineno, column, text, color | line_part.decoration)
except curses.error:
# curses error at last column & row but I don't care
# because it still draws it
# https://stackoverflow.com/questions/10877469/
# ncurses-setting-last-character-on-screen-without-scrolling-enabled
if lineno == win.getyx()[0] and column + len(text) == win.getyx()[1] + 1:
pass
else:
self._logger.debug("curses error")
self._logger.debug("screen_h: %s, lineno: %s", self._screen_h, lineno)
self._logger.debug(
"screen_w: %s, column: %s text: %s, lentext: %s, end_col: %s",
self._screen_w,
column,
text,
len(text),
column + len(text),
)
def _set_colors(self) -> None:
"""Set the colors for curses"""
# curses colors may have already been initialized
# with another instance of curses window
if self._ui_config.colors_initialized is True:
return
self._curs_set(0)
# in the case of a TERM with limited capabilities
# disable color and get out fast
try:
curses.use_default_colors()
except curses.error:
self._logger.error("Errors setting up terminal, no color support")
self._term_osc4_supprt = False
self._ui_config.colors_initialized = True
return
self._logger.debug("curses.COLORS: %s", curses.COLORS)
self._logger.debug("curses.can_change_color: %s", curses.can_change_color())
self._term_osc4_supprt = curses.can_change_color()
if self._ui_config.osc4 is False:
self._term_osc4_supprt = False
self._logger.debug("term_osc4_supprt: %s", self._term_osc4_supprt)
if self._term_osc4_supprt:
with open(self._ui_config.terminal_colors_path, encoding="utf-8") as data_file:
colors = json.load(data_file)
for color_name, color_hex in colors.items():
idx = COLOR_MAP[color_name]
color = hex_to_rgb_curses(color_hex)
curses.init_color(idx, *color)
self._logger.debug("Custom colors set")
else:
self._logger.debug("Using terminal defaults")
for i in range(0, curses.COLORS):
curses.init_pair(i, i, -1)
self._ui_config.colors_initialized = True
| 33.788177
| 98
| 0.56568
|
794a3666f7e17d950abdcbaf432b2e76abd3c097
| 1,137
|
py
|
Python
|
esusu/api/migrations/0005_credit.py
|
olujedai/esusu
|
2a4f79f5aac933fe32f45d778fb4e75e49b8fbda
|
[
"Apache-2.0"
] | null | null | null |
esusu/api/migrations/0005_credit.py
|
olujedai/esusu
|
2a4f79f5aac933fe32f45d778fb4e75e49b8fbda
|
[
"Apache-2.0"
] | null | null | null |
esusu/api/migrations/0005_credit.py
|
olujedai/esusu
|
2a4f79f5aac933fe32f45d778fb4e75e49b8fbda
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.3 on 2019-07-29 12:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20190729_1209'),
]
operations = [
migrations.CreateModel(
name='Credit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.IntegerField(help_text='Designates the amount of money credited to an account.', verbose_name='Amount')),
('date_credited', models.DateTimeField(default=django.utils.timezone.now, help_text='The date a credit was made.', verbose_name='Date')),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='credits', to='api.SocietyAccount')),
('contributor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contributions', to=settings.AUTH_USER_MODEL)),
],
),
]
| 42.111111
| 155
| 0.668426
|
794a37e0eda0eca084dd91ce7c2752b71ea780d5
| 1,515
|
py
|
Python
|
eth/vm/logic/storage.py
|
SAYONG/py-evm
|
f205ed099c5534892c3afbbd1b14a2fa7f597673
|
[
"MIT"
] | null | null | null |
eth/vm/logic/storage.py
|
SAYONG/py-evm
|
f205ed099c5534892c3afbbd1b14a2fa7f597673
|
[
"MIT"
] | null | null | null |
eth/vm/logic/storage.py
|
SAYONG/py-evm
|
f205ed099c5534892c3afbbd1b14a2fa7f597673
|
[
"MIT"
] | null | null | null |
from eth_utils import (
encode_hex,
)
from eth import constants
from eth.vm.computation import BaseComputation
def sstore(computation: BaseComputation) -> None:
slot, value = computation.stack_pop_ints(2)
current_value = computation.state.get_storage(
address=computation.msg.storage_address,
slot=slot,
)
is_currently_empty = not bool(current_value)
is_going_to_be_empty = not bool(value)
if is_currently_empty:
gas_refund = 0
elif is_going_to_be_empty:
gas_refund = constants.REFUND_SCLEAR
else:
gas_refund = 0
if is_currently_empty and is_going_to_be_empty:
gas_cost = constants.GAS_SRESET
elif is_currently_empty:
gas_cost = constants.GAS_SSET
elif is_going_to_be_empty:
gas_cost = constants.GAS_SRESET
else:
gas_cost = constants.GAS_SRESET
computation.consume_gas(gas_cost, reason="SSTORE: {0}[{1}] -> {2} ({3})".format(
encode_hex(computation.msg.storage_address),
slot,
value,
current_value,
))
if gas_refund:
computation.refund_gas(gas_refund)
computation.state.set_storage(
address=computation.msg.storage_address,
slot=slot,
value=value,
)
def sload(computation: BaseComputation) -> None:
slot = computation.stack_pop1_int()
value = computation.state.get_storage(
address=computation.msg.storage_address,
slot=slot,
)
computation.stack_push_int(value)
| 24.836066
| 84
| 0.680528
|
794a3821ed07f9a0ae80af1ccb9585c1c71b9ce9
| 7,288
|
py
|
Python
|
populate_database.py
|
Jabors/financial-data-damodaran
|
0b3e94429faed352a6ee3c7524e7fbde79668703
|
[
"Unlicense",
"MIT"
] | 8
|
2018-01-06T08:33:08.000Z
|
2021-11-08T12:19:18.000Z
|
populate_database.py
|
Jabors/financial-data-damodaran
|
0b3e94429faed352a6ee3c7524e7fbde79668703
|
[
"Unlicense",
"MIT"
] | null | null | null |
populate_database.py
|
Jabors/financial-data-damodaran
|
0b3e94429faed352a6ee3c7524e7fbde79668703
|
[
"Unlicense",
"MIT"
] | 3
|
2020-05-28T19:02:31.000Z
|
2022-02-22T20:08:33.000Z
|
import sys
import csv
import xlrd
from pymongo import MongoClient
from os import listdir
from os.path import isfile, join
import config
def populate_currencies(db):
sheet = xlrd.open_workbook(config.currency_file).sheet_by_index(0)
currency={}
currency['countries']=[sheet.cell(4,0).value]
currency['name']=sheet.cell(4,1).value
currency['_id']=sheet.cell(4,2).value
currency_code=currency['_id']
for i in range(5,300):
try:
if currency_code!=sheet.cell(i,2).value:
db.currencies.replace_one({'_id':currency['_id']},currency,upsert=True)
currency['countries']=[sheet.cell(i,0).value]
currency['name']=sheet.cell(i,1).value
currency['_id']=sheet.cell(i,2).value
currency_code=currency['_id']
else:
currency['countries'].append(sheet.cell(i,0).value)
except IndexError:
db.currencies.replace_one({'_id':currency['_id']},currency,upsert=True)
break
def populate_tax_rates(db):
tax_files=[f for f in listdir(config.effective_tax_path) if isfile(join(config.effective_tax_path, f))]
for file in tax_files:
region=file.split('_')[1].replace('.xls','')
document={}
document['_id']=region
document['rates_by_sector']={}
sheet = xlrd.open_workbook(config.effective_tax_path + file).sheet_by_index(0)
for i in range(8, 200):
try:
sector=sheet.cell(i,0).value.replace('.','')
document['rates_by_sector'][sector]={}
document['rates_by_sector'][sector]['money_making']=sheet.cell(i,2).value
document['rates_by_sector'][sector]['money_losing']=sheet.cell(i,3).value
document['rates_by_sector'][sector]['all']=sheet.cell(i,4).value
except IndexError:
break
db.effective_tax.replace_one({'_id':region},document,upsert=True)
def populate_diversified_betas(db):
beta_files=[f for f in listdir(config.diversified_betas_path) if isfile(join(config.diversified_betas_path, f))]
for file in beta_files:
region=file.split('_')[1].replace('.xls','')
sheet = xlrd.open_workbook(config.diversified_betas_path + file).sheet_by_index(0)
for i in range(8, 200):
try:
sector=sheet.cell(i,0).value.replace('.','')
document={}
document['_id']=region+sector
document['region']=region
document['sector']=sector
document['market_beta']=sheet.cell(i,2).value
document['debt_equity']=sheet.cell(i,3).value
document['tax_rate']=sheet.cell(i,4).value
document['unlevered_beta']=sheet.cell(i,5).value
document['cash_firm']=sheet.cell(i,6).value
document['unlevered_beta_cash_corrected']=sheet.cell(i,7).value
document['sigma_price']=sheet.cell(i,8).value
document['sigma_ebit']=sheet.cell(i,9).value
db.diversified_betas.replace_one({'_id':document['_id']},document,upsert=True)
except IndexError:
break
def populate_undiversified_betas(db):
beta_files=[f for f in listdir(config.undiversified_betas_path) if isfile(join(config.undiversified_betas_path, f))]
for file in beta_files:
region=file.split('_')[1].replace('.xls','')
sheet = xlrd.open_workbook(config.undiversified_betas_path + file).sheet_by_index(0)
for i in range(8, 200):
try:
sector=sheet.cell(i,0).value.replace('.','')
document={}
document['_id']=region+sector
document['region']=region
document['sector']=sector
document['unlevered_beta_partial']=sheet.cell(i,2).value
document['levered_beta_partial']=sheet.cell(i,3).value
document['market_correlation']=sheet.cell(i,4).value
document['unlevered_beta']=sheet.cell(i,5).value
document['levered_beta']=sheet.cell(i,6).value
db.undiversified_betas.replace_one({'_id':document['_id']},document,upsert=True)
except IndexError:
break
def populate_erps(db):
sheet = xlrd.open_workbook(config.erp_file).sheet_by_index(5)
us_spread=0.0038
for i in range(1, 156):
#try:
document = {}
country=sheet.cell(i,0).value
document['rating']=sheet.cell(i,2).value
document['default_spread']=sheet.cell(i,3).value
document['default_spread']=float(document['default_spread'])+us_spread
document['country_risk_premium']=sheet.cell(i,5).value
document['equity_risk_premium']=sheet.cell(i,4).value
document['marginal_tax']=sheet.cell(i,6).value
#Get currency
cursor=db.currencies.find({'countries': country})
for entry in cursor:
document['currency']=entry['name']
document['currency_id']=entry['_id']
db.equity_risk_premium.replace_one({'_id':country},document,upsert=True)
sheet = xlrd.open_workbook(config.erp_file).sheet_by_index(2)
for i in range(7, 153):
country=sheet.cell(i,0).value
document=db.equity_risk_premium.find({'_id': country})[0]
document['region']=sheet.cell(i,1).value
default_spread=sheet.cell(i,6).value
if str(default_spread)!='NA':
document['default_spread']=default_spread+us_spread
document['country_risk_premium']=sheet.cell(i,8).value
document['equity_risk_premium']=sheet.cell(i,7).value
db.equity_risk_premium.replace_one({'_id':document['_id']},document,upsert=True)
def populate_ratings_spreads(db):
#Ratings and spreads
sheet = xlrd.open_workbook(config.ratings_file).sheet_by_index(0)
document={}
document['_id']='large'
document['spreads']=[]
for i in range(18, 33):
spread={}
ratings=sheet.cell(i,2).value
ratings=ratings.split('/')
spread['rating_moodys']=ratings[0]
spread['rating_sp']=ratings[1]
spread['coverage_ratio_lower']=sheet.cell(i,0).value
spread['coverage_ratio_higher']=sheet.cell(i,1).value
spread['spread']=sheet.cell(i,3).value
document['spreads'].append(spread)
db.ratings_spreads.replace_one({'_id': document['_id']}, document, upsert=True)
document['_id']='financial'
document['spreads']=[]
for i in range(18, 33):
spread={}
ratings=sheet.cell(i,7).value
ratings=ratings.split('/')
spread['rating_moodys']=ratings[0]
spread['rating_sp']=ratings[1]
spread['coverage_ratio_lower']=sheet.cell(i,7).value
spread['coverage_ratio_higher']=sheet.cell(i,6).value
spread['spread']=sheet.cell(i,8).value
document['spreads'].append(spread)
db.ratings_spreads.replace_one({'_id': document['_id']}, document, upsert=True)
document['_id']='small'
document['spreads']=[]
for i in range(37, 52):
spread={}
ratings=sheet.cell(i,2).value
ratings=ratings.split('/')
spread['rating_moodys']=ratings[0]
spread['rating_sp']=ratings[1]
spread['coverage_ratio_lower']=sheet.cell(i,0).value
spread['coverage_ratio_higher']=sheet.cell(i,1).value
spread['spread']=sheet.cell(i,3).value
document['spreads'].append(spread)
db.ratings_spreads.replace_one({'_id': document['_id']}, document, upsert=True)
def main():
client=MongoClient(config.mongo_client, config.mongo_port)
db=client[config.mongo_dbname]
#Start with macroeconomic data
#Currencies
populate_currencies(db)
#Effective Tax Rates
populate_tax_rates(db)
#Diversified Betas
populate_diversified_betas(db)
#Undiversified Betas
populate_undiversified_betas(db)
#Equity Risk Premiums
populate_erps(db)
#Populating ratings and spreads
populate_ratings_spreads(db)
if __name__ == '__main__':
main()
| 32.977376
| 118
| 0.700741
|
794a38c115570bdb13552999d1b3e404fb251adf
| 12,541
|
py
|
Python
|
test/functional/feature_fee_estimation.py
|
minerscore/Ritocoin
|
cf4e1570b2bab487b9a70f2e8cf6f98fb42fd4b7
|
[
"MIT"
] | 18
|
2018-11-30T19:07:06.000Z
|
2021-05-17T11:06:12.000Z
|
test/functional/feature_fee_estimation.py
|
minerscore/Ritocoin
|
cf4e1570b2bab487b9a70f2e8cf6f98fb42fd4b7
|
[
"MIT"
] | 1
|
2018-12-08T19:41:43.000Z
|
2018-12-08T19:41:43.000Z
|
test/functional/feature_fee_estimation.py
|
minerscore/Ritocoin
|
cf4e1570b2bab487b9a70f2e8cf6f98fb42fd4b7
|
[
"MIT"
] | 17
|
2018-11-30T17:16:21.000Z
|
2021-10-30T17:33:14.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2017 The Raven Core developers
# Copyright (c) 2018 The Rito Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test fee estimation code."""
from test_framework.test_framework import RitoTestFramework
from test_framework.util import *
from test_framework.script import CScript, OP_1, OP_DROP, OP_2, OP_HASH160, OP_EQUAL, hash160, OP_TRUE
from test_framework.mininode import CTransaction, CTxIn, CTxOut, COutPoint, ToHex, COIN
# Construct 2 trivial P2SH's and the ScriptSigs that spend them
# So we can create many transactions without needing to spend
# time signing.
redeem_script_1 = CScript([OP_1, OP_DROP])
redeem_script_2 = CScript([OP_2, OP_DROP])
P2SH_1 = CScript([OP_HASH160, hash160(redeem_script_1), OP_EQUAL])
P2SH_2 = CScript([OP_HASH160, hash160(redeem_script_2), OP_EQUAL])
# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
SCRIPT_SIG = [CScript([OP_TRUE, redeem_script_1]), CScript([OP_TRUE, redeem_script_2])]
global log
def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
"""
Create and send a transaction with a random fee.
The transaction pays to a trivial P2SH script, and assumes that its inputs
are of the same form.
The function takes a list of confirmed outputs and unconfirmed outputs
and attempts to use the confirmed list first for its inputs.
It adds the newly created outputs to the unconfirmed list.
Returns (raw transaction, fee)
"""
# It's best to exponentially distribute our random fees
# because the buckets are exponentially spaced.
# Exponentially distributed from 1-128 * fee_increment
rand_fee = float(fee_increment)*(1.1892**random.randint(0,28))
# Total fee ranges from min_fee to min_fee + 127*fee_increment
fee = min_fee - fee_increment + satoshi_round(rand_fee)
tx = CTransaction()
total_in = Decimal("0.00000000")
while total_in <= (amount + fee) and len(conflist) > 0:
t = conflist.pop(0)
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
while total_in <= (amount + fee) and len(unconflist) > 0:
t = unconflist.pop(0)
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee, total_in))
tx.vout.append(CTxOut(int((total_in - amount - fee)*COIN), P2SH_1))
tx.vout.append(CTxOut(int(amount*COIN), P2SH_2))
# These transactions don't need to be signed, but we still have to insert
# the ScriptSig that will satisfy the ScriptPubKey.
for inp in tx.vin:
inp.scriptSig = SCRIPT_SIG[inp.prevout.n]
txid = from_node.sendrawtransaction(ToHex(tx), True)
unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee})
unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount})
return (ToHex(tx), fee)
def split_inputs(from_node, txins, txouts, initial_split = False):
"""
We need to generate a lot of inputs so we can generate a ton of transactions.
This function takes an input from txins, and creates and sends a transaction
which splits the value into 2 outputs which are appended to txouts.
Previously this was designed to be small inputs so they wouldn't have
a high coin age when the notion of priority still existed.
"""
prevtxout = txins.pop()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(prevtxout["txid"], 16), prevtxout["vout"]), b""))
half_change = satoshi_round(prevtxout["amount"]/2)
rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000")
tx.vout.append(CTxOut(int(half_change*COIN), P2SH_1))
tx.vout.append(CTxOut(int(rem_change*COIN), P2SH_2))
# If this is the initial split we actually need to sign the transaction
# Otherwise we just need to insert the proper ScriptSig
if (initial_split) :
completetx = from_node.signrawtransaction(ToHex(tx))["hex"]
else :
tx.vin[0].scriptSig = SCRIPT_SIG[prevtxout["vout"]]
completetx = ToHex(tx)
txid = from_node.sendrawtransaction(completetx, True)
txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change})
txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change})
def check_estimates(node, fees_seen, max_invalid, print_estimates = True):
"""
This function calls estimatefee and verifies that the estimates
meet certain invariants.
"""
all_estimates = [ node.estimatefee(i) for i in range(1,26) ]
if print_estimates:
log.info([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]])
delta = 1.0e-6 # account for rounding error
last_e = max(fees_seen)
for e in [x for x in all_estimates if x >= 0]:
# Estimates should be within the bounds of what transactions fees actually were:
if float(e)+delta < min(fees_seen) or float(e)-delta > max(fees_seen):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
%(float(e), min(fees_seen), max(fees_seen)))
# Estimates should be monotonically decreasing
if float(e)-delta > last_e:
raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms"
%(float(e),float(last_e)))
last_e = e
valid_estimate = False
invalid_estimates = 0
for i,e in enumerate(all_estimates): # estimate is for i+1
if e >= 0:
valid_estimate = True
if i >= 13: # for n>=14 estimatesmartfee(n/2) should be at least as high as estimatefee(n)
assert(node.estimatesmartfee((i+1)//2)["feerate"] > float(e) - delta)
else:
invalid_estimates += 1
# estimatesmartfee should still be valid
approx_estimate = node.estimatesmartfee(i+1)["feerate"]
answer_found = node.estimatesmartfee(i+1)["blocks"]
assert(approx_estimate > 0)
assert(answer_found > i+1)
# Once we're at a high enough confirmation count that we can give an estimate
# We should have estimates for all higher confirmation counts
if valid_estimate:
raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate")
# Check on the expected number of different confirmation counts
# that we might not have valid estimates for
if invalid_estimates > max_invalid:
raise AssertionError("More than (%d) invalid estimates"%(max_invalid))
return all_estimates
class EstimateFeeTest(RitoTestFramework):
def set_test_params(self):
self.num_nodes = 3
def setup_network(self):
"""
We'll setup the network to have 3 nodes that all mine with different parameters.
But first we need to use one node to create a lot of outputs
which we will use to generate our transactions.
"""
self.add_nodes(3, extra_args=[["-maxorphantx=1000", "-whitelist=127.0.0.1"],
["-blockmaxsize=17000", "-maxorphantx=1000", "-deprecatedrpc=estimatefee"],
["-blockmaxsize=8000", "-maxorphantx=1000"]])
# Use node0 to mine blocks for input splitting
# Node1 mines small blocks but that are bigger than the expected transaction rate.
# NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
# (17k is room enough for 110 or so transactions)
# Node2 is a stingy miner, that
# produces too small blocks (room for only 55 or so transactions)
def transact_and_mine(self, numblocks, mining_node):
min_fee = Decimal("0.00001")
# We will now mine numblocks blocks generating on average 100 transactions between each block
# We shuffle our confirmed txout set before each set of transactions
# small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
# resorting to tx's that depend on the mempool when those run out
for i in range(numblocks):
random.shuffle(self.confutxo)
for j in range(random.randrange(100-50,100+50)):
from_index = random.randint(1,2)
(txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
self.memutxo, Decimal("0.005"), min_fee, min_fee)
tx_kbytes = (len(txhex) // 2) / 1000.0
self.fees_per_kb.append(float(fee)/tx_kbytes)
sync_mempools(self.nodes[0:3], wait=.1)
mined = mining_node.getblock(mining_node.generate(1)[0],True)["tx"]
sync_blocks(self.nodes[0:3], wait=.1)
# update which txouts are confirmed
newmem = []
for utx in self.memutxo:
if utx["txid"] in mined:
self.confutxo.append(utx)
else:
newmem.append(utx)
self.memutxo = newmem
def run_test(self):
self.log.info("This test is time consuming, please be patient")
self.log.info("Splitting inputs so we can generate tx's")
# Make log handler available to helper functions
global log
log = self.log
# Start node0
self.start_node(0)
self.txouts = []
self.txouts2 = []
# Split a coinbase into two transaction puzzle outputs
split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
# Mine
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
# Repeatedly split those 2 outputs, doubling twice for each rep
# Use txouts to monitor the available utxo, since these won't be tracked in wallet
reps = 0
while (reps < 5):
#Double txouts to txouts2
while (len(self.txouts)>0):
split_inputs(self.nodes[0], self.txouts, self.txouts2)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
#Double txouts2 to txouts
while (len(self.txouts2)>0):
split_inputs(self.nodes[0], self.txouts2, self.txouts)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
reps += 1
self.log.info("Finished splitting")
# Now we can connect the other nodes, didn't want to connect them earlier
# so the estimates would not be affected by the splitting transactions
self.start_node(1)
self.start_node(2)
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[2], 1)
self.sync_all()
self.fees_per_kb = []
self.memutxo = []
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
self.log.info("Will output estimates for 1/2/3/6/15/25 blocks")
for i in range(2):
self.log.info("Creating transactions and mining them with a block size that can't keep up")
# Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine
self.transact_and_mine(10, self.nodes[2])
check_estimates(self.nodes[1], self.fees_per_kb, 14)
self.log.info("Creating transactions and mining them at a block size that is just big enough")
# Generate transactions while mining 10 more blocks, this time with node1
# which mines blocks with capacity just above the rate that transactions are being created
self.transact_and_mine(10, self.nodes[1])
check_estimates(self.nodes[1], self.fees_per_kb, 2)
# Finish by mining a normal-sized block:
while len(self.nodes[1].getrawmempool()) > 0:
self.nodes[1].generate(1)
sync_blocks(self.nodes[0:3], wait=.1)
self.log.info("Final estimates after emptying mempools")
check_estimates(self.nodes[1], self.fees_per_kb, 2)
if __name__ == '__main__':
EstimateFeeTest().main()
| 47.324528
| 113
| 0.646838
|
794a390fd81e73160cac0b65a01acb5a4717a1f0
| 1,534
|
py
|
Python
|
Fig2_Scheme_tapping/Fig_tapping.py
|
ealopez/flat_punch
|
3b41865ebd60d22cb0e32a8ef200ec790b578d08
|
[
"MIT-0"
] | null | null | null |
Fig2_Scheme_tapping/Fig_tapping.py
|
ealopez/flat_punch
|
3b41865ebd60d22cb0e32a8ef200ec790b578d08
|
[
"MIT-0"
] | null | null | null |
Fig2_Scheme_tapping/Fig_tapping.py
|
ealopez/flat_punch
|
3b41865ebd60d22cb0e32a8ef200ec790b578d08
|
[
"MIT-0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 12 09:37:42 2016
@author: Enrique Alejandro
"""
import numpy as np
import matplotlib.pyplot as plt
amph = np.loadtxt('summary.txt', skiprows=1)
amp = amph[3]*10.0*1.0e-9 #multiplying by free amplitude and then converting to m
phi = amph[4]*np.pi/180.0 #converting phase to radians
omega = 2*np.pi*6.0e5
compu = np.loadtxt('compu_4.00.txt', skiprows=1)
t = compu[:,0]*1.0e-6 #converting to seconds
tip = compu[:,1]*1.0e-9 #converting to meters
cos_ref = compu[:,2]*1.0e-9 #converting to meters
Fts = compu[:,3]*1.0e-9 #converting to Newtons
xb = compu[:,4]*1.0e-9 #converting to meters
Zeq = np.zeros(np.size(tip))
Zeq[:] = 4.0e-9
fig, ax = plt.subplots(1,1,figsize=(12,5))
ax.plot(t*1.0e6, tip*1.0e9, 'b', ls='dashdot', lw=2, label=r'$z_{t-s}(t)=Z_{eq}+z(t)$')
ax.plot(t*1.0e6, xb*1.0e9, 'g', lw=3, label=r'$Sample Position$')
ax.legend(loc=4, fontsize=18, frameon=False)
ax.plot(t*1.0e6, cos_ref*1.0e9, 'k', lw=1, ls='dotted', label=R'$z(t)=A*sin(\omega*t)$')
ax.plot(t*1.0e6, Zeq*1.0e9, color='dimgray', ls='dashed', lw=1, label=r'$Z_{eq}$')
ax.legend(loc=4, fontsize=16, frameon=False)
#plt.plot(t*1.0e6, amp*np.cos(omega*t-phi)*1.0e9, 'c', lw=2, label='Sine Reference')
plt.xlabel('time, a.u.', fontsize='20',fontweight='bold')
plt.ylabel('Z-position, nm',fontsize='20',fontweight='bold')
ax.set_xlim(840.2,839.95+(2*np.pi/omega)*1.0e6*1.4)
ax.set_ylim(-12,13.5)
ax.set_xticks([])
#ax.set_yticks([])
#ax.axis('off')
plt.savefig('Tapping_Scheme.png', bbox_inches='tight')
| 35.674419
| 88
| 0.664276
|
794a39ecdcafd8c63cbeb24459efcd02761b3afb
| 1,146
|
py
|
Python
|
src/01.py
|
sorabatake/article_15701_convert_optical_and_sar
|
62c21b43e6e364f0131bea6f14e6e4bc0697deb2
|
[
"CC0-1.0"
] | null | null | null |
src/01.py
|
sorabatake/article_15701_convert_optical_and_sar
|
62c21b43e6e364f0131bea6f14e6e4bc0697deb2
|
[
"CC0-1.0"
] | null | null | null |
src/01.py
|
sorabatake/article_15701_convert_optical_and_sar
|
62c21b43e6e364f0131bea6f14e6e4bc0697deb2
|
[
"CC0-1.0"
] | null | null | null |
import os, requests, subprocess
from osgeo import gdal
from osgeo import gdal_array
# Entry point
def main():
cmd = "find ./data/ALOS* | grep tif"
process = (subprocess.Popen(cmd, stdout=subprocess.PIPE,shell=True).communicate()[0]).decode('utf-8')
file_name_list = process.rsplit()
for _file_name in file_name_list:
convert_file_name = _file_name + "_converted.tif"
crop_file_name = _file_name + "_cropped.tif"
x1 = 139.807069
y1 = 35.707233
x2 = 139.814111
y2 = 35.714069
cmd = 'gdalwarp -t_srs EPSG:4326 ' +_file_name + ' ' + convert_file_name
process = (subprocess.Popen(cmd, stdout=subprocess.PIPE,shell=True).communicate()[0]).decode('utf-8')
print("[Done] ", convert_file_name)
cmd = 'gdal_translate -projwin ' + str(x1) + ' ' + str(y1) + ' ' + str(x2) + ' ' + str(y2) + ' ' + convert_file_name + " " + crop_file_name
print(cmd)
process = (subprocess.Popen(cmd, stdout=subprocess.PIPE,shell=True).communicate()[0]).decode('utf-8')
print("[Done] ", crop_file_name)
if __name__=="__main__":
main()
| 42.444444
| 148
| 0.623909
|
794a3a46a91e33875e346027db2c7b987abc7d59
| 2,818
|
py
|
Python
|
appengine/predator/analysis/test/occurrence_test.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | 2
|
2021-04-13T21:22:18.000Z
|
2021-09-07T02:11:57.000Z
|
appengine/predator/analysis/test/occurrence_test.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | 21
|
2020-09-06T02:41:05.000Z
|
2022-03-02T04:40:01.000Z
|
appengine/predator/analysis/test/occurrence_test.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from analysis.analysis_testcase import AnalysisTestCase
from analysis.occurrence import Occurrence
from analysis.occurrence import DefaultOccurrenceRanking
from analysis.occurrence import RankByOccurrence
from analysis.stacktrace import StackFrame
from analysis.stacktrace import CallStack
from analysis.suspect import Suspect
from gae_libs.pipeline_wrapper import pipeline_handlers
class DummyClassifier(object):
def GetClassFromStackFrame(self, frame):
if frame.dep_path == 'src/':
return 'class_1'
if frame.dep_path == 'dummy/':
return None
return 'class_2'
def GetClassFromSuspect(self, _result): # pragma: no cover.
return 'class_3'
def Classify(self, results, crash_stack):
top_n_frames = 4
if results:
classes = map(self.GetClassFromSuspect, results[:top_n_frames])
else:
classes = map(self.GetClassFromStackFrame,
crash_stack.frames[:top_n_frames])
class_list = RankByOccurrence(classes, 1)
if class_list:
return class_list[0]
return ''
class ClassifierTest(AnalysisTestCase):
def testDefaultOccurrenceRanking(self):
self.assertEqual(DefaultOccurrenceRanking(Occurrence('c1', [0])),
(-1, 0))
self.assertEqual(DefaultOccurrenceRanking(Occurrence('c1', [0, 1])),
(-float('inf'), 0))
def testClassifyCrashStack(self):
dummy_classifier = DummyClassifier()
crash_stack = CallStack(0)
self.assertEqual(dummy_classifier.Classify([], crash_stack), '')
crash_stack = CallStack(0, frame_list=[
StackFrame(0, 'src/', 'a::c(p* &d)', 'f0.cc', 'src/f0.cc', [177]),
StackFrame(1, 'src/', 'a::d(a* c)', 'f1.cc', 'src/f1.cc', [227]),
StackFrame(2, 'src/dummy', 'a::e(int)', 'f2.cc', 'src/f2.cc', [87]),
StackFrame(3, 'dummy/', 'a::g(int)', 'f3.cc', 'src/f3.cc', [87])])
self.assertEqual(dummy_classifier.Classify([], crash_stack), 'class_1')
crash_stack = CallStack(0, frame_list=[
StackFrame(0, 'src/', 'a::c(p* &d)', 'f0.cc', 'src/f0.cc', [177]),
StackFrame(1, 'src/dummy', 'a::d(a* c)', 'f1.cc', 'src/f1.cc', [227]),
StackFrame(2, 'src/dummy', 'a::e(int)', 'f2.cc', 'src/f2.cc', [87])])
self.assertEqual(dummy_classifier.Classify([], crash_stack), 'class_2')
def testClassifySuspects(self):
dummy_classifier = DummyClassifier()
suspect = Suspect(self.GetDummyChangeLog(), 'src/')
suspect.file_to_stack_infos = {
'f0.cc': [(StackFrame(
0, 'src/', 'a::c(p* &d)', 'f0.cc', 'src/f0.cc', [177]), 0)]
}
self.assertEqual(dummy_classifier.Classify([suspect], CallStack(0)),
'class_3')
| 33.152941
| 78
| 0.660752
|
794a3a53d739a3d720e829dc7df66efd2f3f14a0
| 5,979
|
py
|
Python
|
plivo/resources/applications.py
|
burhanahmed-plivo/plivo-python
|
61f86f20efb2bdd30a9ae40ed837c20af42f20b9
|
[
"MIT"
] | 42
|
2015-01-16T07:56:16.000Z
|
2021-08-20T04:45:39.000Z
|
plivo/resources/applications.py
|
burhanahmed-plivo/plivo-python
|
61f86f20efb2bdd30a9ae40ed837c20af42f20b9
|
[
"MIT"
] | 70
|
2015-01-30T04:11:04.000Z
|
2022-03-29T21:04:55.000Z
|
plivo/resources/applications.py
|
burhanahmed-plivo/plivo-python
|
61f86f20efb2bdd30a9ae40ed837c20af42f20b9
|
[
"MIT"
] | 65
|
2015-04-10T22:17:57.000Z
|
2021-06-06T13:09:31.000Z
|
# -*- coding: utf-8 -*-
"""
Application class - along with its list class
"""
from plivo.base import (ListResponseObject, PlivoResource,
PlivoResourceInterface)
from plivo.resources.accounts import Subaccount
from plivo.utils import to_param_dict
from plivo.utils.validators import *
class Application(PlivoResource):
_name = 'Application'
_identifier_string = 'app_id'
def update(self,
answer_url,
answer_method='POST',
hangup_url=None,
hangup_method='POST',
fallback_answer_url=None,
fallback_method='POST',
message_url=None,
message_method='POST',
default_number_app=False,
default_endpoint_app=False,
subaccount=None,
log_incoming_messages=True,
public_uri=None):
params = to_param_dict(self.update, locals())
self.__dict__.update(params)
return self.client.applications.update(self.id, **params)
def delete(self, cascade=None, new_endpoint_application=None):
return self.client.applications.delete(self.id, cascade, new_endpoint_application)
def get(self):
resp = self.client.applications.get()
self.__dict__.update(resp.__dict__)
return resp
class Applications(PlivoResourceInterface):
_resource_type = Application
@validate_args(
answer_url=[is_url()],
app_name=[of_type(six.text_type)],
answer_method=[optional(of_type(six.text_type))],
hangup_url=[optional(is_url())],
hangup_method=[optional(of_type(six.text_type))],
fallback_answer_url=[optional(is_url())],
fallback_method=[optional(of_type(six.text_type))],
message_url=[optional(is_url())],
message_method=[optional(of_type(six.text_type))],
default_number_app=[optional(of_type_exact(bool))],
default_endpoint_app=[optional(of_type_exact(bool))],
subaccount=[optional(is_subaccount())],
log_incoming_messages=[optional(of_type_exact(bool))],
public_uri=[optional(of_type_exact(bool))])
def create(self,
answer_url,
app_name,
answer_method='POST',
hangup_url=None,
hangup_method='POST',
fallback_answer_url=None,
fallback_method='POST',
message_url=None,
message_method='POST',
default_number_app=False,
default_endpoint_app=False,
subaccount=None,
log_incoming_messages=True,
public_uri=None):
if subaccount:
if isinstance(subaccount, Subaccount):
subaccount = subaccount.id
return self.client.request('POST', ('Application', ), to_param_dict(self.create, locals()), is_voice_request=True)
@validate_args(app_id=[of_type(six.text_type)])
def get(self, app_id):
return self.client.request(
'GET', ('Application', app_id), response_type=Application, is_voice_request=True)
@validate_args(
subaccount=[optional(is_subaccount())],
limit=[
optional(
all_of(
of_type(*six.integer_types),
check(lambda limit: 0 < limit <= 20, '0 < limit <= 20')))
],
offset=[
optional(
all_of(
of_type(*six.integer_types),
check(lambda offset: 0 <= offset, '0 <= offset')))
])
def list(self, subaccount=None, limit=20, offset=0):
if subaccount:
if isinstance(subaccount, Subaccount):
subaccount = subaccount.id
return self.client.request(
'GET', ('Application', ),
to_param_dict(self.list, locals()),
response_type=ListResponseObject,
objects_type=Application, is_voice_request=True)
@validate_args(
answer_url=[is_url()],
app_id=[of_type(six.text_type)],
answer_method=[optional(of_type(six.text_type))],
hangup_url=[optional(is_url())],
hangup_method=[optional(of_type(six.text_type))],
fallback_answer_url=[optional(is_url())],
fallback_method=[optional(of_type(six.text_type))],
message_url=[optional(is_url())],
message_method=[optional(of_type(six.text_type))],
default_number_app=[optional(of_type_exact(bool))],
default_endpoint_app=[optional(of_type_exact(bool))],
subaccount=[optional(is_subaccount())],
log_incoming_messages=[optional(of_type_exact(bool))],
public_uri=[optional(of_type_exact(bool))])
def update(self,
app_id,
answer_url,
answer_method='POST',
hangup_url=None,
hangup_method='POST',
fallback_answer_url=None,
fallback_method='POST',
message_url=None,
message_method='POST',
default_number_app=False,
default_endpoint_app=False,
subaccount=None,
log_incoming_messages=True,
public_uri=None):
if subaccount:
if isinstance(subaccount, Subaccount):
subaccount = subaccount.id
return self.client.request('POST', ('Application', app_id),
to_param_dict(self.update, locals()), is_voice_request=True)
@validate_args(
app_id=[of_type(six.text_type)],
new_endpoint_application=[optional(of_type(six.text_type))],
cascade=[optional(of_type_exact(bool))]
)
def delete(self, app_id, cascade=None, new_endpoint_application=None):
return self.client.request('DELETE', ('Application', app_id),
to_param_dict(self.delete, locals()), is_voice_request=True)
| 38.326923
| 122
| 0.59893
|
794a3b0ca0d1f2457c7dd119487ece205f093953
| 1,126
|
py
|
Python
|
logger/networking/websockets/WebsocketHandler.py
|
LandonPatmore/iracing-live-telemetry
|
f5296194fb7c7e051bce0102960d988a020a7223
|
[
"MIT"
] | null | null | null |
logger/networking/websockets/WebsocketHandler.py
|
LandonPatmore/iracing-live-telemetry
|
f5296194fb7c7e051bce0102960d988a020a7223
|
[
"MIT"
] | null | null | null |
logger/networking/websockets/WebsocketHandler.py
|
LandonPatmore/iracing-live-telemetry
|
f5296194fb7c7e051bce0102960d988a020a7223
|
[
"MIT"
] | null | null | null |
import asyncio
from asyncio import Queue
import websockets
class WebsocketHandler:
def __init__(self, url: str, receiver_queue: Queue, streaming_queue: Queue):
self.receiver_queue: Queue = receiver_queue
self.streaming_queue: Queue = streaming_queue
self.url = url
async def consumer_handler(self, websocket):
async for message in websocket:
await self.receiver_queue.put(message)
async def producer_handler(self, websocket):
while True:
data = await self.streaming_queue.get()
await websocket.send(data)
async def handler(self, websocket):
consumer_task = asyncio.create_task(self.consumer_handler(websocket))
producer_task = asyncio.create_task(self.producer_handler(websocket))
done, pending = await asyncio.wait(
[consumer_task, producer_task],
return_when=asyncio.FIRST_COMPLETED,
)
for task in pending:
task.cancel()
async def run(self):
async with websockets.connect(self.url) as websocket:
await self.handler(websocket)
| 33.117647
| 80
| 0.672291
|
794a3bd38580f98d41f6e8e373fca2465a718bab
| 2,090
|
py
|
Python
|
TNUCrawler.py
|
thviet79/MutilLanguage
|
758f87ed9d0802864c2930e01e2bf014a09c7a67
|
[
"MIT"
] | null | null | null |
TNUCrawler.py
|
thviet79/MutilLanguage
|
758f87ed9d0802864c2930e01e2bf014a09c7a67
|
[
"MIT"
] | null | null | null |
TNUCrawler.py
|
thviet79/MutilLanguage
|
758f87ed9d0802864c2930e01e2bf014a09c7a67
|
[
"MIT"
] | 1
|
2021-09-28T23:34:46.000Z
|
2021-09-28T23:34:46.000Z
|
import Punctuation
import os
import ConvertHtmlToText
import datetime
import SeparateDocumentToSentences
def extractContentNews(src_link, language):
content = ""
if (language == "zh" or language == "en"):
content = ConvertHtmlToText.getTextFromTagsWithId(src_link= src_link,tag= "div",id= "wrapper")
return content
return ConvertHtmlToText.getTextFromTagsWithId(src_link = src_link, tag= "div", id="container")
def crawlWithLanguage(language):
"""
:param language: "en", "zh"
:return: None
"""
if(language != "en" and language != 'zh'):
raise Exception("Resource not supported")
current_dir = os.path.dirname(os.path.realpath(__file__))
map_Punctuation = Punctuation.getPunctuationForLanguage(language)
resource_file = "{}/TNUCrawler/{}-{}.txt".format(current_dir,"vi",language)
Document_folder = current_dir + "/Data/crawler_success/TNU/Document/"
if not os.path.exists(Document_folder):
os.makedirs(Document_folder)
f = open(resource_file, "r",encoding="utf-8")
if not f:
raise Exception("Resource file not exist")
for line in f:
src_link, tgt_link, mutil_page = (line.split("\t"))
file_name = datetime.datetime.now().timestamp()
list_src = SeparateDocumentToSentences.slpit_text( text = extractContentNews(src_link, "vi")
,list_sign= list(map_Punctuation.keys()) )
file = open(Document_folder+"{}.vi.txt".format(file_name), "w", encoding="utf-8")
for line in list_src:
file.write("{} \n".format(line))
file.close()
list_tgt = SeparateDocumentToSentences.slpit_text( text= extractContentNews(tgt_link, "zh")
, list_sign=list(map_Punctuation.keys()))
file = open(Document_folder + "{}.{}.txt".format(file_name, language), "w", encoding="utf-8")
for line in list_tgt:
file.write("{} \n".format(line))
file.close()
f.close()
crawlWithLanguage("zh")
| 36.666667
| 102
| 0.632057
|
794a3c82068161562dddbee42ff4034459b47f9a
| 157
|
py
|
Python
|
test2/app.py
|
josephernest/vversioning
|
de09ab66c018a5aceee787101c5e307f957a2601
|
[
"MIT"
] | null | null | null |
test2/app.py
|
josephernest/vversioning
|
de09ab66c018a5aceee787101c5e307f957a2601
|
[
"MIT"
] | null | null | null |
test2/app.py
|
josephernest/vversioning
|
de09ab66c018a5aceee787101c5e307f957a2601
|
[
"MIT"
] | null | null | null |
"""
==CHANGELOG==
* currently in development
* new feature xyz
==CHANGELOG==
"""
sqdgfhsqgfksqfkjgsqfkqsgdkfsqkgfqsdf
sqgjdfjsqdhfqgskdgfkqgsdjfsqdfggdsqjf
| 15.7
| 37
| 0.802548
|
794a3d3601ff0a74d9d45c8f9c89423fb5b06b5c
| 4,878
|
py
|
Python
|
necrobot/match/matchutil.py
|
incnone/necrobot
|
e97b582b36e07001ee63f5e952230e41568f5acb
|
[
"MIT"
] | 8
|
2016-01-15T00:28:55.000Z
|
2020-02-10T21:23:11.000Z
|
necrobot/match/matchutil.py
|
incnone/necrobot
|
e97b582b36e07001ee63f5e952230e41568f5acb
|
[
"MIT"
] | 12
|
2017-01-01T22:14:54.000Z
|
2021-02-10T00:09:51.000Z
|
necrobot/match/matchutil.py
|
incnone/necrobot
|
e97b582b36e07001ee63f5e952230e41568f5acb
|
[
"MIT"
] | 18
|
2016-02-05T22:19:46.000Z
|
2020-02-12T05:11:57.000Z
|
import datetime
from typing import Optional
from necrobot.match.matchgsheetinfo import MatchGSheetInfo
from necrobot.match import matchdb
from necrobot.match.match import Match
from necrobot.match.matchinfo import MatchInfo
from necrobot.race import racedb
from necrobot.race.raceinfo import RaceInfo
match_library = {}
def invalidate_cache():
global match_library
match_library = {}
async def make_match(register=False, update=False, **kwargs) -> Optional[Match]:
# noinspection PyIncorrectDocstring
"""Create a Match object.
Parameters
----------
racer_1_id: int
The DB user ID of the first racer.
racer_2_id: int
The DB user ID of the second racer.
max_races: int
The maximum number of races this match can be. (If is_best_of is True, then the match is a best of
max_races; otherwise, the match is just repeating max_races.)
match_id: int
The DB unique ID of this match. If this parameter is specified, the return value may be None, if no match
in the database has the specified ID.
suggested_time: datetime.datetime
The time the match is suggested for. If no tzinfo, UTC is assumed.
r1_confirmed: bool
Whether the first racer has confirmed the match time.
r2_confirmed: bool
Whether the second racer has confirmed the match time.
r1_unconfirmed: bool
Whether the first racer wishes to unconfirm the match time.
r2_unconfirmed: bool
Whether the second racer wishes to unconfirm the match time.
match_info: MatchInfo
The types of races to be run in this match.
cawmentator_id: int
The DB unique ID of the cawmentator for this match.
sheet_id: int
The sheetID of the worksheet the match was created from, if any.
league_tag: str
The tag for the league this match is in, if any.
register: bool
Whether to register the match in the database.
update: bool
If match_id is given and this is True, updates the database match with any other specified parameters.
Returns
---------
Match
The created match.
"""
if 'match_id' in kwargs and kwargs['match_id'] is not None:
cached_match = await get_match_from_id(kwargs['match_id'])
if update and cached_match is not None:
cached_match.raw_update(**kwargs)
await cached_match.commit()
return cached_match
match = Match(commit_fn=matchdb.write_match, **kwargs)
await match.initialize()
if register:
await match.commit()
match_library[match.match_id] = match
return match
async def get_match_from_id(match_id: int) -> Match or None:
"""Get a match object from its DB unique ID.
Parameters
----------
match_id: int
The databse ID of the match.
Returns
-------
Optional[Match]
The match found, if any.
"""
if match_id is None:
return None
if match_id in match_library:
return match_library[match_id]
raw_data = await matchdb.get_raw_match_data(match_id)
if raw_data is not None:
return await make_match_from_raw_db_data(raw_data)
else:
return None
async def delete_match(match_id: int) -> None:
await matchdb.delete_match(match_id=match_id)
if match_id in match_library:
del match_library[match_id]
async def make_match_from_raw_db_data(row: list) -> Match:
match_id = int(row[0])
if match_id in match_library:
return match_library[match_id]
match_info = MatchInfo(
race_info=await racedb.get_race_info_from_type_id(int(row[1])) if row[1] is not None else RaceInfo(),
ranked=bool(row[9]),
is_best_of=bool(row[10]),
max_races=int(row[11])
)
sheet_info = MatchGSheetInfo()
sheet_info.wks_id = row[14]
sheet_info.row = row[15]
new_match = Match(
commit_fn=matchdb.write_match,
match_id=match_id,
match_info=match_info,
racer_1_id=int(row[2]),
racer_2_id=int(row[3]),
suggested_time=row[4],
finish_time=row[16],
r1_confirmed=bool(row[5]),
r2_confirmed=bool(row[6]),
r1_unconfirmed=bool(row[7]),
r2_unconfirmed=bool(row[8]),
cawmentator_id=row[12],
channel_id=int(row[13]) if row[13] is not None else None,
gsheet_info=sheet_info,
autogenned=bool(row[17]),
league_tag=row[18]
)
await new_match.initialize()
match_library[new_match.match_id] = new_match
return new_match
async def get_race_data(match: Match):
return await matchdb.get_match_race_data(match.match_id)
async def match_exists_between(racer_1, racer_2) -> bool:
prior_match_ids = await matchdb.get_matches_between(racer_1.user_id, racer_2.user_id)
return bool(prior_match_ids)
| 31.070064
| 113
| 0.676917
|
794a3d872db93bc02608500f1904eac33f346ad7
| 18,987
|
py
|
Python
|
tests/python/contrib/test_cudnn.py
|
shengxinhu/tvm
|
06c443e9959452c6da3a911fe0c11e08c5554477
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4,640
|
2017-08-17T19:22:15.000Z
|
2019-11-04T15:29:46.000Z
|
tests/python/contrib/test_cudnn.py
|
shengxinhu/tvm
|
06c443e9959452c6da3a911fe0c11e08c5554477
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2,863
|
2017-08-17T19:55:50.000Z
|
2019-11-04T17:18:41.000Z
|
tests/python/contrib/test_cudnn.py
|
shengxinhu/tvm
|
06c443e9959452c6da3a911fe0c11e08c5554477
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1,352
|
2017-08-17T19:30:38.000Z
|
2019-11-04T16:09:29.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import pytest
import tvm
import tvm.testing
from tvm import te
from tvm import relay
from tvm.contrib import cudnn
from tvm.contrib.nvcc import have_fp16
from tvm.contrib import graph_executor
import numpy as np
import tvm.topi.testing
import tvm.testing
from tvm.relay.op.contrib.cudnn import partition_for_cudnn
requires_cudnn = pytest.mark.skipif(
tvm.get_global_func("tvm.contrib.cudnn.conv2d.forward", True) is None,
reason="CuDNN is not enabled",
)
def verify_conv2d(data_dtype, conv_dtype, tensor_format=0, groups=1):
in_channel = 4
out_channel = 16
filter_h = 3
filter_w = 3
pad_h = 1
pad_w = 1
stride_h = 1
stride_w = 1
dilation_h = 1
dilation_w = 1
batch = 3
height = 32
width = 32
if data_dtype == "float16" and not have_fp16(tvm.cuda(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
# schedule
if tensor_format == 0:
xshape = [batch, in_channel, height, width]
wshape = [out_channel, in_channel // groups, filter_h, filter_w]
else:
xshape = [batch, height, width, in_channel]
wshape = [out_channel, filter_h, filter_w, in_channel // groups]
X = te.placeholder(xshape, name="X", dtype=data_dtype)
W = te.placeholder(wshape, name="W", dtype=data_dtype)
Y = cudnn.conv_forward(
X,
W,
[pad_h, pad_w],
[stride_h, stride_w],
[dilation_h, dilation_w],
conv_mode=1,
tensor_format=tensor_format,
conv_dtype=conv_dtype,
algo=-1,
groups=groups,
)
yshape = [x.value for x in Y.shape]
s = te.create_schedule(Y.op)
# validation
dev = tvm.cuda(0)
f = tvm.build(s, [X, W, Y], "cuda --host=llvm", name="conv2d")
x_np = np.random.uniform(-1, 1, xshape).astype(data_dtype)
w_np = np.random.uniform(-1, 1, wshape).astype(data_dtype)
y_np = np.zeros(yshape).astype(data_dtype)
x = tvm.nd.array(x_np, dev)
w = tvm.nd.array(w_np, dev)
y = tvm.nd.array(y_np, dev)
if tensor_format == 0:
c_np = tvm.topi.testing.conv2d_nchw_python(x_np, w_np, 1, 1, groups=groups)
elif tensor_format == 1:
wt = w_np.transpose((1, 2, 3, 0)) # OHWI => HWIO
c_np = tvm.topi.testing.conv2d_nhwc_python(x_np, wt, 1, 1, groups=groups)
f(x, w, y)
tvm.testing.assert_allclose(y.numpy(), c_np, atol=1e-2, rtol=1e-2)
@tvm.testing.requires_gpu
@requires_cudnn
def test_conv2d():
verify_conv2d("float32", "float32", tensor_format=0)
verify_conv2d("float16", "float32", tensor_format=1)
verify_conv2d("float16", "float16", tensor_format=0)
verify_conv2d("float16", "float16", tensor_format=1)
verify_conv2d("int8", "int32", tensor_format=1)
verify_conv2d("float32", "float32", tensor_format=0, groups=2)
verify_conv2d("float16", "float32", tensor_format=1, groups=2)
verify_conv2d("float16", "float16", tensor_format=0, groups=2)
verify_conv2d("int8", "int32", tensor_format=1, groups=2)
def verify_conv3d(data_dtype, conv_dtype, tensor_format=0, groups=1):
in_channel = 4
out_channel = 16
filter_d = 3
filter_h = 3
filter_w = 3
pad_d = 1
pad_h = 1
pad_w = 1
stride_d = 1
stride_h = 1
stride_w = 1
dilation_d = 1
dilation_h = 1
dilation_w = 1
batch = 3
depth = 32
height = 32
width = 32
# schedule
xshape = [batch, in_channel, depth, height, width]
wshape = [out_channel, in_channel // groups, filter_d, filter_h, filter_w]
X = te.placeholder(xshape, name="X", dtype=data_dtype)
W = te.placeholder(wshape, name="W", dtype=data_dtype)
Y = cudnn.conv_forward(
X,
W,
[pad_d, pad_h, pad_w],
[stride_d, stride_h, stride_w],
[dilation_d, dilation_h, dilation_w],
conv_mode=1,
tensor_format=tensor_format,
algo=-1,
conv_dtype=conv_dtype,
groups=groups,
)
yshape = [x.value for x in Y.shape]
s = te.create_schedule(Y.op)
# validation
dev = tvm.cuda(0)
f = tvm.build(s, [X, W, Y], target="cuda --host=llvm", name="conv3d")
x_np = np.random.uniform(-1, 1, xshape).astype(data_dtype)
w_np = np.random.uniform(-1, 1, wshape).astype(data_dtype)
y_np = np.zeros(yshape).astype(data_dtype)
x = tvm.nd.array(x_np, dev)
w = tvm.nd.array(w_np, dev)
y = tvm.nd.array(y_np, dev)
if tensor_format == 0:
c_np = tvm.topi.testing.conv3d_ncdhw_python(x_np, w_np, 1, 1, groups)
else:
raise AssertionError("For now, conv3d tensor format only support: 0(NCHW)")
f(x, w, y)
tvm.testing.assert_allclose(y.numpy(), c_np, atol=3e-5, rtol=1e-4)
@tvm.testing.requires_gpu
@requires_cudnn
def test_conv3d():
verify_conv3d("float32", "float32", tensor_format=0)
verify_conv3d("float32", "float32", tensor_format=0, groups=2)
def verify_softmax(shape, axis, dtype="float32", log_softmax=False):
cudnn_op = cudnn.log_softmax if log_softmax else cudnn.softmax
testing_op = (
tvm.topi.testing.log_softmax_python if log_softmax else tvm.topi.testing.softmax_python
)
A = te.placeholder(shape, dtype=dtype, name="A")
B = cudnn_op(A, axis)
s = te.create_schedule([B.op])
dev = tvm.cuda(0)
a_np = np.random.uniform(size=shape).astype(dtype)
b_np = testing_op(a_np)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
f = tvm.build(s, [A, B], target="cuda --host=llvm", name="softmax")
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3)
def verify_softmax_4d(shape, dtype="float32", log_softmax=False):
cudnn_op = cudnn.log_softmax if log_softmax else cudnn.softmax
testing_op = (
tvm.topi.testing.log_softmax_python if log_softmax else tvm.topi.testing.softmax_python
)
A = te.placeholder(shape, dtype=dtype, name="A")
B = cudnn_op(A, axis=1)
s = te.create_schedule([B.op])
dev = tvm.cuda(0)
n, c, h, w = shape
a_np = np.random.uniform(size=shape).astype(dtype)
b_np = testing_op(a_np.transpose(0, 2, 3, 1).reshape(h * w, c))
b_np = b_np.reshape(n, h, w, c).transpose(0, 3, 1, 2)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
f = tvm.build(s, [A, B], target="cuda --host=llvm", name="softmax")
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3)
@tvm.testing.requires_gpu
@requires_cudnn
def test_softmax():
verify_softmax((32, 10), -1)
verify_softmax((3, 4), -1)
verify_softmax((1, 5), -1, "float64")
verify_softmax_4d((1, 16, 256, 256))
verify_softmax_4d((1, 16, 256, 256), "float64")
verify_softmax((32, 10), -1, log_softmax=True)
verify_softmax((3, 4), -1, log_softmax=True)
verify_softmax((1, 5), -1, "float64", log_softmax=True)
verify_softmax_4d((1, 16, 256, 256), log_softmax=True)
verify_softmax_4d((1, 16, 256, 256), "float64", log_softmax=True)
def verify_conv2d_backward_data(data_dtype, conv_dtype, tensor_format=0, tol=1e-5):
batch = 3
in_channel = 4
out_channel = 16
filter_h, filter_w = 3, 3
pad_h, pad_w = 1, 1
stride_h, stride_w = 1, 1
height, width = 32, 32
if tensor_format == 0:
xshape = [batch, in_channel, height, width]
wshape = [out_channel, in_channel, filter_h, filter_w]
oshape = xshape
oshape[1] = out_channel
ref_func = tvm.topi.testing.conv2d_transpose_nchw_python
else:
xshape = [batch, height, width, in_channel]
wshape = [out_channel, filter_h, filter_w, in_channel]
oshape = xshape
oshape[3] = out_channel
ref_func = lambda dy_np, w_np, strides, padding, out_pad: tvm.topi.testing.conv2d_transpose_nhwc_python(
dy_np, np.transpose(w_np, [1, 2, 3, 0]), "HWOI", strides, padding, out_pad
)
dy_np = np.random.uniform(-1, 1, oshape).astype(data_dtype)
w_np = np.random.uniform(-1, 1, wshape).astype(data_dtype)
if data_dtype == "float16":
dx_np = ref_func(
dy_np.astype("float32"),
w_np.astype("float32"),
(stride_h, stride_w),
(pad_h, pad_w),
(0, 0),
)
dx_np = dx_np.astype("float16")
else:
dx_np = ref_func(dy_np, w_np, (stride_h, stride_w), (pad_h, pad_w), (0, 0))
dy = te.placeholder(oshape, name="dy", dtype=data_dtype)
w = te.placeholder(wshape, name="dw", dtype=data_dtype)
dx = cudnn.conv_backward_data(
dy,
w,
[pad_h, pad_w],
[stride_h, stride_w],
[1, 1],
conv_mode=1,
tensor_format=tensor_format,
conv_dtype=conv_dtype,
groups=1,
)
s = te.create_schedule(dx.op)
dev = tvm.cuda(0)
f = tvm.build(s, [dy, w, dx], "cuda --host=llvm", name="conv2d_backward_data")
dy = tvm.nd.array(dy_np, dev)
w = tvm.nd.array(w_np, dev)
dx = tvm.nd.array(dx_np, dev)
f(dy, w, dx)
tvm.testing.assert_allclose(dx.numpy(), dx_np, atol=tol, rtol=tol)
@tvm.testing.requires_gpu
@requires_cudnn
def test_conv2d_backward_data():
verify_conv2d_backward_data("float32", "float32", tensor_format=0, tol=1e-5)
verify_conv2d_backward_data("float32", "float32", tensor_format=1, tol=1e-2)
# The scipy convolve function does not support fp16, so the reference will be computed with
# fp32. Use larger tolerance to be on the safe side (1e-2 also seems mostly ok).
verify_conv2d_backward_data("float16", "float16", tensor_format=1, tol=1e-1)
def verify_conv2d_backward_filter(data_dtype, conv_dtype, tensor_format=0, tol=1e-5):
batch = 3
in_channel = 4
out_channel = 16
filter_h, filter_w = 3, 3
pad_h, pad_w = 1, 1
stride_h, stride_w = 1, 1
height, width = 32, 32
if tensor_format == 0:
x_shape = [batch, in_channel, height, width]
dy_shape = [batch, out_channel, height, width]
else:
x_shape = [batch, height, width, in_channel]
dy_shape = [batch, height, width, out_channel]
x_np = np.random.uniform(-1, 1, x_shape).astype(data_dtype)
dy_np = np.random.uniform(-1, 1, dy_shape).astype(data_dtype)
dw_np = tvm.topi.testing.conv2d_backward_weight_python(
dy_np,
x_np,
(filter_h, filter_w),
(stride_h, stride_w),
(pad_h, pad_w),
"NCHW" if tensor_format == 0 else "NHWC",
)
x = te.placeholder(x_shape, name="x", dtype=data_dtype)
dy = te.placeholder(dy_shape, name="dy", dtype=data_dtype)
dw = cudnn.conv_backward_filter(
dy,
x,
(filter_h, filter_w),
[pad_h, pad_w],
[stride_h, stride_w],
[1, 1],
conv_mode=1,
tensor_format=tensor_format,
conv_dtype=conv_dtype,
)
s = te.create_schedule(dw.op)
dev = tvm.cuda(0)
f = tvm.build(s, [dy, x, dw], "cuda --host=llvm", name="conv2d_backward_filter")
x = tvm.nd.array(x_np, dev)
dy = tvm.nd.array(dy_np, dev)
dw = tvm.nd.array(dw_np, dev)
f(dy, x, dw)
tvm.testing.assert_allclose(dw.numpy(), dw_np, atol=tol, rtol=tol)
@tvm.testing.requires_gpu
@requires_cudnn
def test_conv2d_backward_filter():
verify_conv2d_backward_filter("float32", "float32", tensor_format=0, tol=1e-2)
verify_conv2d_backward_filter("float32", "float32", tensor_format=1, tol=1e-2)
test_kwargs_default_2d = {
"tensor_format": 0,
"pad": [1, 1],
"stride": [1, 1],
"dilation": [1, 1],
"x_shape": [16, 4, 32, 32],
"w_shape": [8, 4, 3, 3],
"groups": 1,
"conv_dtype": "float32",
"data_dtype": "float32",
}
test_kwargs_default_3d = {
"tensor_format": 0,
"pad": [1, 1, 1],
"stride": [1, 1, 1],
"dilation": [1, 1, 1],
"x_shape": [16, 4, 32, 32, 32],
"w_shape": [8, 4, 3, 3, 3],
"groups": 1,
"conv_dtype": "float32",
"data_dtype": "float32",
}
conv_output_shape_conditions = {
"2d_small": test_kwargs_default_2d,
"2d_large": {
**test_kwargs_default_2d,
"x_shape": [16, 32, 512, 1024],
"w_shape": [8, 32, 5, 5],
},
"2d_pad": {**test_kwargs_default_2d, "pad": [2, 3]},
"2d_stride": {**test_kwargs_default_2d, "stride": [2, 3]},
"2d_dilation": {**test_kwargs_default_2d, "dilation": [2, 3]},
"2d_groups": {**test_kwargs_default_2d, "groups": 4, "w_shape": [8, 1, 3, 3]},
"2d_NHWC": {
**test_kwargs_default_2d,
"tensor_format": 1,
"x_shape": [16, 32, 32, 4],
"w_shape": [8, 3, 3, 4],
},
"2d_NCHW_VECT_C": {
**test_kwargs_default_2d,
"tensor_format": 2,
"w_shape": [8, 16, 3, 3],
"data_dtype": "int8x4",
},
"3d_small": test_kwargs_default_3d,
"3d_large": {
**test_kwargs_default_3d,
"x_shape": [16, 32, 64, 128, 256],
"w_shape": [8, 32, 5, 5, 5],
},
"3d_pad": {**test_kwargs_default_3d, "pad": [2, 3, 4]},
"3d_stride": {**test_kwargs_default_3d, "stride": [2, 3, 4]},
"3d_dilation": {**test_kwargs_default_3d, "dilation": [2, 3, 4]},
"3d_groups": {**test_kwargs_default_3d, "groups": 4, "w_shape": [8, 1, 3, 3, 3]},
"3d_NCHW_VECT_C": {
**test_kwargs_default_3d,
"tensor_format": 2,
"w_shape": [8, 16, 3, 3, 3],
"data_dtype": "int8x4",
},
}
@pytest.fixture(
params=[pytest.param(kwargs, id=name) for name, kwargs in conv_output_shape_conditions.items()]
)
def conv_output_shape_kwargs(request):
return request.param
def _verify_cudnn_relay(expr):
np.random.seed(42)
mod = tvm.IRModule.from_expr(expr)
mod = relay.transform.InferType()(mod)
func = mod["main"]
cudnn_mod = partition_for_cudnn(mod)
assert len(cudnn_mod.get_global_vars()) == 2
input_data = []
for param in func.params:
shape = [int(x) for x in param.checked_type.shape]
input_data.append(
(
param.name_hint,
np.random.uniform(-32, 32, size=shape).astype(param.checked_type.dtype),
)
)
cuda_config = (tvm.target.cuda(), tvm.cuda(), cudnn_mod)
cpu_config = (tvm.target.Target("llvm"), tvm.cpu(), mod)
outputs = []
for target, dev, test_mod in [cuda_config, cpu_config]:
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(test_mod, target=target, target_host=cpu_config[0])
module = graph_executor.GraphModule(lib["default"](dev))
for name, data in input_data:
module.set_input(name, tvm.nd.array(data, dev))
module.run()
out_type = func.body.checked_type
outputs.append(
module.get_output(0, tvm.nd.empty(out_type.shape, dtype=out_type.dtype)).numpy()
)
tvm.testing.assert_allclose(
outputs[0],
outputs[1],
rtol=1e-3,
atol=30,
)
@tvm.testing.requires_cuda
@pytest.mark.parametrize(
"shape,axis",
[
((200,), 0),
((13, 27), 0),
((44, 12, 67), 1),
((1, 16, 16, 8), 2),
((2, 4, 6, 8, 10), 3),
],
)
@pytest.mark.parametrize(
"dtype",
[
"float32",
"float16",
"float64",
],
)
def test_relay_cudnn_softmax(shape, axis, dtype):
x = tvm.relay.var("x", tvm.relay.TensorType(shape, dtype))
softmax = relay.op.nn.softmax(x, axis=axis)
_verify_cudnn_relay(softmax)
@tvm.testing.requires_cuda
@pytest.mark.parametrize(
"shape,axis",
[
((32, 16), -1),
((13, 27), 1),
],
)
@pytest.mark.parametrize(
"dtype",
[
"float32",
"float16",
"float64",
],
)
def test_relay_cudnn_log_softmax(shape, axis, dtype):
x = tvm.relay.var("x", tvm.relay.TensorType(shape, dtype))
log_softmax = relay.op.nn.log_softmax(x, axis=axis)
_verify_cudnn_relay(log_softmax)
@tvm.testing.requires_cuda
@pytest.mark.parametrize(
"n,h,w,ci,co,groups",
[
(1, 16, 20, 8, 16, 1),
(10, 17, 19, 16, 8, 4),
],
)
@pytest.mark.parametrize(
"kh,kw,padding",
[
(1, 1, (3, 1, 3, 1)),
(3, 3, (1, 2)),
(7, 2, (0, 0)),
],
)
@pytest.mark.parametrize(
"strides,dilation,dtype",
[
((1, 1), (1, 1), "float32"),
((2, 1), (2, 2), "float16"),
((3, 3), (1, 2), "float64"),
],
)
def test_relay_cudnn_conv2d(n, h, w, ci, co, kh, kw, strides, dilation, padding, groups, dtype):
data = tvm.relay.var("data", tvm.relay.TensorType((n, ci, h, w), dtype))
weight = tvm.relay.var("weight", tvm.relay.TensorType((co, ci // groups, kh, kw), dtype))
conv2d = relay.op.nn.conv2d(
data,
weight,
groups=groups,
channels=co,
kernel_size=(kh, kw),
strides=strides,
dilation=dilation,
padding=padding,
data_layout="NCHW",
kernel_layout="OIHW",
)
_verify_cudnn_relay(conv2d)
@tvm.testing.requires_cuda
@pytest.mark.parametrize(
"n,h,w,ci,co,groups",
[
(1, 16, 20, 8, 16, 1),
(10, 17, 19, 16, 8, 4),
],
)
@pytest.mark.parametrize(
"kh,kw,padding,strides,dilation,dtype",
[
(1, 1, (3, 1, 3, 1), (1, 1), (1, 1), "float32"),
(3, 3, (1, 2), (2, 1), (2, 2), "float16"),
(7, 2, (0, 0), (3, 3), (1, 2), "float64"),
],
)
@pytest.mark.parametrize("activation", [True, False])
def test_relay_cudnn_conv2d_bias_act(
n, h, w, ci, co, kh, kw, strides, dilation, padding, groups, dtype, activation
):
data = tvm.relay.var("data", tvm.relay.TensorType((n, ci, h, w), dtype))
weight = tvm.relay.var("weight", tvm.relay.TensorType((co, ci // groups, kh, kw), dtype))
bias = relay.var("bias", relay.TensorType((co,), dtype))
conv2d = relay.op.nn.conv2d(
data,
weight,
groups=groups,
channels=co,
kernel_size=(kh, kw),
strides=strides,
dilation=dilation,
padding=padding,
data_layout="NCHW",
kernel_layout="OIHW",
)
out = relay.op.nn.bias_add(conv2d, bias)
if activation:
out = relay.op.nn.relu(out)
_verify_cudnn_relay(out)
if __name__ == "__main__":
tvm.testing.main()
| 30.234076
| 112
| 0.612998
|
794a3df2463e2c75328187fb456614a72b083fa3
| 3,917
|
py
|
Python
|
Codes/Python32/Lib/importlib/test/source/test_source_encoding.py
|
eyantra/FireBird_Swiss_Knife
|
cac322cf28e2d690b86ba28a75e87551e5e47988
|
[
"MIT"
] | 319
|
2016-09-22T15:54:48.000Z
|
2022-03-18T02:36:58.000Z
|
Codes/Python32/Lib/importlib/test/source/test_source_encoding.py
|
eyantra/FireBird_Swiss_Knife
|
cac322cf28e2d690b86ba28a75e87551e5e47988
|
[
"MIT"
] | 9
|
2016-11-03T21:56:41.000Z
|
2020-08-09T19:27:37.000Z
|
Codes/Python32/Lib/importlib/test/source/test_source_encoding.py
|
eyantra/FireBird_Swiss_Knife
|
cac322cf28e2d690b86ba28a75e87551e5e47988
|
[
"MIT"
] | 27
|
2016-10-06T16:05:32.000Z
|
2022-03-18T02:37:00.000Z
|
from importlib import _bootstrap
from . import util as source_util
import codecs
import re
import sys
# Because sys.path gets essentially blanked, need to have unicodedata already
# imported for the parser to use.
import unicodedata
import unittest
CODING_RE = re.compile(r'coding[:=]\s*([-\w.]+)')
class EncodingTest(unittest.TestCase):
"""PEP 3120 makes UTF-8 the default encoding for source code
[default encoding].
PEP 263 specifies how that can change on a per-file basis. Either the first
or second line can contain the encoding line [encoding first line]
encoding second line]. If the file has the BOM marker it is considered UTF-8
implicitly [BOM]. If any encoding is specified it must be UTF-8, else it is
an error [BOM and utf-8][BOM conflict].
"""
variable = '\u00fc'
character = '\u00c9'
source_line = "{0} = '{1}'\n".format(variable, character)
module_name = '_temp'
def run_test(self, source):
with source_util.create_modules(self.module_name) as mapping:
with open(mapping[self.module_name], 'wb') as file:
file.write(source)
loader = _bootstrap._SourceFileLoader(self.module_name,
mapping[self.module_name])
return loader.load_module(self.module_name)
def create_source(self, encoding):
encoding_line = "# coding={0}".format(encoding)
assert CODING_RE.search(encoding_line)
source_lines = [encoding_line.encode('utf-8')]
source_lines.append(self.source_line.encode(encoding))
return b'\n'.join(source_lines)
def test_non_obvious_encoding(self):
# Make sure that an encoding that has never been a standard one for
# Python works.
encoding_line = "# coding=koi8-r"
assert CODING_RE.search(encoding_line)
source = "{0}\na=42\n".format(encoding_line).encode("koi8-r")
self.run_test(source)
# [default encoding]
def test_default_encoding(self):
self.run_test(self.source_line.encode('utf-8'))
# [encoding first line]
def test_encoding_on_first_line(self):
encoding = 'Latin-1'
source = self.create_source(encoding)
self.run_test(source)
# [encoding second line]
def test_encoding_on_second_line(self):
source = b"#/usr/bin/python\n" + self.create_source('Latin-1')
self.run_test(source)
# [BOM]
def test_bom(self):
self.run_test(codecs.BOM_UTF8 + self.source_line.encode('utf-8'))
# [BOM and utf-8]
def test_bom_and_utf_8(self):
source = codecs.BOM_UTF8 + self.create_source('utf-8')
self.run_test(source)
# [BOM conflict]
def test_bom_conflict(self):
source = codecs.BOM_UTF8 + self.create_source('latin-1')
with self.assertRaises(SyntaxError):
self.run_test(source)
class LineEndingTest(unittest.TestCase):
r"""Source written with the three types of line endings (\n, \r\n, \r)
need to be readable [cr][crlf][lf]."""
def run_test(self, line_ending):
module_name = '_temp'
source_lines = [b"a = 42", b"b = -13", b'']
source = line_ending.join(source_lines)
with source_util.create_modules(module_name) as mapping:
with open(mapping[module_name], 'wb') as file:
file.write(source)
loader = _bootstrap._SourceFileLoader(module_name,
mapping[module_name])
return loader.load_module(module_name)
# [cr]
def test_cr(self):
self.run_test(b'\r')
# [crlf]
def test_crlf(self):
self.run_test(b'\r\n')
# [lf]
def test_lf(self):
self.run_test(b'\n')
def test_main():
from test.support import run_unittest
run_unittest(EncodingTest, LineEndingTest)
if __name__ == '__main__':
test_main()
| 31.58871
| 80
| 0.642584
|
794a3e24600cba3f59bb737a8852209e375ed193
| 18
|
py
|
Python
|
bioviz/__init__.py
|
BioWiz/msa
|
634a99b2a36393dbec75ff008997de0ebd6cb2cb
|
[
"BSD-3-Clause"
] | 1
|
2021-04-01T05:50:44.000Z
|
2021-04-01T05:50:44.000Z
|
bioviz/__init__.py
|
BioWiz/msa
|
634a99b2a36393dbec75ff008997de0ebd6cb2cb
|
[
"BSD-3-Clause"
] | null | null | null |
bioviz/__init__.py
|
BioWiz/msa
|
634a99b2a36393dbec75ff008997de0ebd6cb2cb
|
[
"BSD-3-Clause"
] | null | null | null |
__all__ = ["msa"]
| 9
| 17
| 0.555556
|
794a402bcece8fef79bbcdff5187f9e52bfa3fc1
| 3,113
|
py
|
Python
|
pinax/apps/blog/templatetags/switchcase.py
|
ericholscher/pinax
|
6ba4585671c6a3d9ac154441296f8a403453469f
|
[
"MIT"
] | 1
|
2015-11-08T11:32:53.000Z
|
2015-11-08T11:32:53.000Z
|
pinax/apps/blog/templatetags/switchcase.py
|
ericholscher/pinax
|
6ba4585671c6a3d9ac154441296f8a403453469f
|
[
"MIT"
] | null | null | null |
pinax/apps/blog/templatetags/switchcase.py
|
ericholscher/pinax
|
6ba4585671c6a3d9ac154441296f8a403453469f
|
[
"MIT"
] | null | null | null |
"""
Simplistic switch/case tag for Django.
Usage::
{% load switchcase %}
{% switch meal %}
{% case "spam" %}...{% endcase %}
{% case "eggs" %}...{% endcase %}
{% endswitch %}
"""
from django import template
register = template.Library()
@register.tag
def switch(parser, token):
"""
Switch tag. Usage::
{% switch meal %}
{% case "spam" %}...{% endcase %}
{% case "eggs" %}...{% endcase %}
{% endswitch %}
Note that ``{% case %}`` arguments can be variables if you like (as can
switch arguments, buts that's a bit silly).
"""
# Parse out the arguments.
args = token.split_contents()
if len(args) != 2:
raise template.TemplateSyntaxError("%s tag tags exactly 2 arguments." % args[0])
# Pull out all the children of the switch tag (until {% endswitch %}).
childnodes = parser.parse(("endswitch",))
# Remove the {% endswitch %} node so it doesn't get parsed twice.
parser.delete_first_token()
# We just care about case children; all other direct children get ignored.
casenodes = childnodes.get_nodes_by_type(CaseNode)
return SwitchNode(args[1], casenodes)
@register.tag
def case(parser, token):
"""
Case tag. Used only inside ``{% switch %}`` tags, so see above for those docs.
"""
args = token.split_contents()
assert len(args) == 2
# Same dance as above, except this time we care about all the child nodes
children = parser.parse(("endcase",))
parser.delete_first_token()
return CaseNode(args[1], children)
class SwitchNode(template.Node):
def __init__(self, value, cases):
self.value = value
self.cases = cases
def render(self, context):
# Resolve the value; if it's a non-existant variable don't even bother
# checking the values of the cases since they'll never match.
try:
value = template.resolve_variable(self.value, context)
except VariableDoesNotExist:
return ""
# Check each case, and if it matches return the rendered content
# of that case (short-circuit).
for case in self.cases:
if case.equals(value, context):
return case.render(context)
# No matches; render nothing.
return ""
class CaseNode(template.Node):
def __init__(self, value, childnodes):
self.value = value
self.childnodes = childnodes
def equals(self, otherval, context):
"""
Check to see if this case's value equals some other value. This is
called from ``SwitchNode.render()``, above.
"""
try:
return template.resolve_variable(self.value, context) == otherval
except VariableDoesNotExist:
# If the variable doesn't exist, it doesn't equal anything.
return False
def render(self, context):
"""Render this particular case, which means rendering its child nodes."""
return self.childnodes.render(context)
| 31.444444
| 88
| 0.60167
|
794a408ee5ed3aa8c5d8ef9335e90eb681836837
| 14,581
|
py
|
Python
|
cpovc_ovc/migrations/0001_initial.py
|
TimzOwen/cpims-ovc-3.0
|
41e65175e8a72b2a6bd61555ecc97e45409f5170
|
[
"Apache-2.0"
] | 2
|
2022-02-26T14:04:40.000Z
|
2022-03-23T17:33:32.000Z
|
cpovc_ovc/migrations/0001_initial.py
|
TimzOwen/cpims-ovc-3.0
|
41e65175e8a72b2a6bd61555ecc97e45409f5170
|
[
"Apache-2.0"
] | null | null | null |
cpovc_ovc/migrations/0001_initial.py
|
TimzOwen/cpims-ovc-3.0
|
41e65175e8a72b2a6bd61555ecc97e45409f5170
|
[
"Apache-2.0"
] | 19
|
2022-02-26T13:44:58.000Z
|
2022-03-26T17:20:22.000Z
|
# Generated by Django 4.0.2 on 2022-04-25 12:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('cpovc_main', '0002_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cpovc_registry', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='OVCAggregate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('indicator_name', models.CharField(max_length=100)),
('project_year', models.IntegerField()),
('reporting_period', models.CharField(max_length=50)),
('cbo', models.CharField(max_length=255)),
('subcounty', models.CharField(max_length=100)),
('county', models.CharField(max_length=100)),
('ward', models.CharField(max_length=100)),
('implementing_partnerid', models.IntegerField()),
('implementing_partner', models.CharField(max_length=200)),
('indicator_count', models.IntegerField()),
('age', models.IntegerField()),
('gender', models.CharField(max_length=50)),
('county_active', models.IntegerField()),
('subcounty_active', models.IntegerField()),
('ward_active', models.IntegerField()),
('timestamp_created', models.DateTimeField(null=True)),
('timestamp_updated', models.DateTimeField(auto_now=True, null=True)),
],
options={
'verbose_name': 'OVC aggregate data',
'verbose_name_plural': 'OVC aggregate data',
'db_table': 'ovc_aggregate',
},
),
migrations.CreateModel(
name='OVCCluster',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('cluster_name', models.CharField(max_length=150)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('is_void', models.BooleanField(default=False)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'OVC Cluster',
'verbose_name_plural': 'OVC Clusters',
'db_table': 'ovc_cluster',
},
),
migrations.CreateModel(
name='OVCFacility',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('facility_code', models.CharField(max_length=10, null=True)),
('facility_name', models.CharField(max_length=200)),
('is_void', models.BooleanField(default=False)),
('sub_county', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='cpovc_main.setupgeography')),
],
options={
'verbose_name': 'OVC Facility',
'verbose_name_plural': 'OVC Facilities',
'db_table': 'ovc_facility',
},
),
migrations.CreateModel(
name='OVCUpload',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('implementing_partnerid', models.IntegerField()),
('project_year', models.IntegerField()),
('reporting_period', models.CharField(max_length=50)),
('ovc_filename', models.CharField(max_length=255)),
('created_at', models.DateField(default=django.utils.timezone.now, null=True)),
],
options={
'verbose_name': 'OVC upload data',
'verbose_name_plural': 'OVC upload data',
'db_table': 'ovc_upload',
},
),
migrations.CreateModel(
name='OVCViralload',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('viral_load', models.IntegerField(null=True)),
('viral_date', models.DateField(null=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('is_void', models.BooleanField(default=False)),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cpovc_registry.regperson')),
],
options={
'verbose_name': 'OVC Viral Load',
'verbose_name_plural': 'OVC Viral Loads',
'db_table': 'ovc_viral_load',
},
),
migrations.CreateModel(
name='OVCSchool',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('school_level', models.CharField(choices=[('SLEC', 'ECD'), ('SLPR', 'Primary'), ('SLSE', 'Secondary'), ('SLUN', 'University'), ('SLTV', 'Tertiary / Vocational')], default='1', max_length=5)),
('school_name', models.CharField(max_length=200)),
('is_void', models.BooleanField(default=False)),
('sub_county', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cpovc_main.setupgeography')),
],
options={
'verbose_name': 'OVC school',
'verbose_name_plural': 'OVC Schools',
'db_table': 'ovc_school',
},
),
migrations.CreateModel(
name='OVCRegistration',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('registration_date', models.DateField(default=django.utils.timezone.now)),
('has_bcert', models.BooleanField(default=False)),
('is_disabled', models.BooleanField(default=False)),
('hiv_status', models.CharField(max_length=4, null=True)),
('art_status', models.CharField(max_length=4, null=True)),
('school_level', models.CharField(max_length=4, null=True)),
('immunization_status', models.CharField(max_length=4, null=True)),
('org_unique_id', models.CharField(max_length=15, null=True)),
('exit_reason', models.CharField(max_length=4, null=True)),
('exit_date', models.DateField(default=django.utils.timezone.now, null=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('is_active', models.BooleanField(default=True)),
('is_void', models.BooleanField(default=False)),
('caretaker', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='ctaker', to='cpovc_registry.regperson')),
('child_cbo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cpovc_registry.regorgunit')),
('child_chv', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='chv', to='cpovc_registry.regperson')),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cpovc_registry.regperson')),
],
options={
'verbose_name': 'OVC Registration',
'verbose_name_plural': 'OVC Registration',
'db_table': 'ovc_registration',
},
),
migrations.CreateModel(
name='OVCHouseHold',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('head_identifier', models.CharField(max_length=255)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('is_void', models.BooleanField(default=False)),
('head_person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cpovc_registry.regperson')),
],
options={
'verbose_name': 'OVC Registration',
'verbose_name_plural': 'OVC Registration',
'db_table': 'ovc_household',
},
),
migrations.CreateModel(
name='OVCHHMembers',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('hh_head', models.BooleanField(default=False)),
('member_type', models.CharField(max_length=4)),
('member_alive', models.CharField(default='AYES', max_length=4)),
('death_cause', models.CharField(max_length=4, null=True)),
('hiv_status', models.CharField(max_length=4, null=True)),
('date_linked', models.DateField(default=django.utils.timezone.now)),
('date_delinked', models.DateField(null=True)),
('is_void', models.BooleanField(default=False)),
('house_hold', models.ForeignKey(default=uuid.uuid4, on_delete=django.db.models.deletion.CASCADE, to='cpovc_ovc.ovchousehold')),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cpovc_registry.regperson')),
],
options={
'verbose_name': 'OVC Registration',
'verbose_name_plural': 'OVC Registration',
'db_table': 'ovc_household_members',
},
),
migrations.CreateModel(
name='OVCHealth',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('art_status', models.CharField(max_length=4)),
('date_linked', models.DateField()),
('ccc_number', models.CharField(max_length=20)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('timestamp_updated', models.DateTimeField(auto_now=True, null=True)),
('is_void', models.BooleanField(default=False)),
('facility', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cpovc_ovc.ovcfacility')),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cpovc_registry.regperson')),
],
options={
'verbose_name': 'OVC Care Health',
'verbose_name_plural': 'OVC Care Health',
'db_table': 'ovc_care_health',
},
),
migrations.CreateModel(
name='OVCExit',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('org_unit_name', models.CharField(max_length=150, null=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('is_void', models.BooleanField(default=False)),
('org_unit', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='cpovc_registry.regorgunit')),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cpovc_registry.regperson')),
],
options={
'verbose_name': 'OVC Exit Org Unit',
'verbose_name_plural': 'OVC Exit Org Units',
'db_table': 'ovc_exit_organization',
},
),
migrations.CreateModel(
name='OVCEligibility',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('criteria', models.CharField(max_length=5)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('is_void', models.BooleanField(default=False)),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cpovc_registry.regperson')),
],
options={
'verbose_name': 'OVC Eligibility',
'verbose_name_plural': 'OVC Eligibility',
'db_table': 'ovc_eligibility',
},
),
migrations.CreateModel(
name='OVCEducation',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('school_level', models.CharField(max_length=4)),
('school_class', models.CharField(max_length=4)),
('admission_type', models.CharField(max_length=4)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('is_void', models.BooleanField(default=False)),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cpovc_registry.regperson')),
('school', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cpovc_ovc.ovcschool')),
],
options={
'verbose_name': 'OVC Care Education',
'verbose_name_plural': 'OVC Care Education',
'db_table': 'ovc_care_education',
},
),
migrations.CreateModel(
name='OVCClusterCBO',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('added_at', models.DateTimeField(default=django.utils.timezone.now)),
('is_void', models.BooleanField(default=False)),
('cbo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cpovc_registry.regorgunit')),
('cluster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cpovc_ovc.ovccluster')),
],
options={
'verbose_name': 'OVC Cluster CBO',
'verbose_name_plural': 'OVC Cluster CBOs',
'db_table': 'ovc_cluster_cbo',
},
),
]
| 53.215328
| 208
| 0.573898
|
794a408eeac7b869380c11a21da3e0f4f05bfb54
| 13,930
|
py
|
Python
|
pytests/bucket_collections/collections_base.py
|
ashwin2002/TAF
|
4223787a1f4c0fe9fa841543020b48ada9ade9e3
|
[
"Apache-2.0"
] | null | null | null |
pytests/bucket_collections/collections_base.py
|
ashwin2002/TAF
|
4223787a1f4c0fe9fa841543020b48ada9ade9e3
|
[
"Apache-2.0"
] | null | null | null |
pytests/bucket_collections/collections_base.py
|
ashwin2002/TAF
|
4223787a1f4c0fe9fa841543020b48ada9ade9e3
|
[
"Apache-2.0"
] | null | null | null |
from math import ceil
from Cb_constants import CbServer, DocLoading
from basetestcase import ClusterSetup
from collections_helper.collections_spec_constants import \
MetaConstants, MetaCrudParams
from couchbase_helper.durability_helper import DurabilityHelper
from membase.api.rest_client import RestConnection
from BucketLib.BucketOperations import BucketHelper
from BucketLib.bucket import Bucket
from remote.remote_util import RemoteMachineShellConnection
from cb_tools.cbstats import Cbstats
from java.lang import Exception as Java_base_exception
class CollectionBase(ClusterSetup):
def setUp(self):
super(CollectionBase, self).setUp()
self.log_setup_status("CollectionBase", "started")
self.MAX_SCOPES = CbServer.max_scopes
self.MAX_COLLECTIONS = CbServer.max_collections
self.key = 'test_collection'.rjust(self.key_size, '0')
self.simulate_error = self.input.param("simulate_error", None)
self.error_type = self.input.param("error_type", "memory")
self.doc_ops = self.input.param("doc_ops", None)
self.spec_name = self.input.param("bucket_spec",
"single_bucket.default")
self.data_spec_name = self.input.param("data_spec_name",
"initial_load")
self.remove_default_collection = \
self.input.param("remove_default_collection", False)
self.action_phase = self.input.param("action_phase",
"before_default_load")
self.skip_collections_cleanup = \
self.input.param("skip_collections_cleanup", False)
self.validate_docs_count_during_teardown = \
self.input.param("validate_docs_count_during_teardown", False)
self.batch_size = self.input.param("batch_size", 200)
self.vbuckets = self.input.param("vbuckets", self.cluster_util.vbuckets)
self.crud_batch_size = 100
self.num_nodes_affected = 1
if self.num_replicas > 1:
self.num_nodes_affected = 2
if self.doc_ops:
self.doc_ops = self.doc_ops.split(';')
self.durability_helper = DurabilityHelper(
self.log, len(self.cluster.nodes_in_cluster),
self.durability_level)
# Disable auto-failover to avoid failover of nodes
status = RestConnection(self.cluster.master) \
.update_autofailover_settings(False, 120, False)
self.assertTrue(status, msg="Failure during disabling auto-failover")
self.bucket_helper_obj = BucketHelper(self.cluster.master)
try:
self.collection_setup()
except Java_base_exception as exception:
self.handle_setup_exception(exception)
except Exception as exception:
self.handle_setup_exception(exception)
self.supported_d_levels = \
self.bucket_util.get_supported_durability_levels()
self.log_setup_status("CollectionBase", "complete")
def tearDown(self):
shell = RemoteMachineShellConnection(self.cluster.master)
cbstat_obj = Cbstats(shell)
for bucket in self.bucket_util.buckets:
if bucket.bucketType != Bucket.Type.MEMCACHED:
result = cbstat_obj.all_stats(bucket.name)
self.log.info("Bucket: %s, Active Resident ratio(DGM): %s%%"
% (bucket.name,
result["vb_active_perc_mem_resident"]))
self.log.info("Bucket: %s, Replica Resident ratio(DGM): %s%%"
% (bucket.name,
result["vb_replica_perc_mem_resident"]))
if not self.skip_collections_cleanup \
and bucket.bucketType != Bucket.Type.MEMCACHED:
self.bucket_util.remove_scope_collections_for_bucket(bucket)
shell.disconnect()
if self.validate_docs_count_during_teardown:
self.bucket_util.validate_docs_per_collections_all_buckets()
super(CollectionBase, self).tearDown()
@staticmethod
def create_sdk_clients(num_threads, master,
buckets, sdk_client_pool, sdk_compression):
# Fetch num_collections per bucket. Used for 'req_clients' calc
cols_in_bucket = dict()
for bucket in buckets:
collections_in_bucket = 0
for _, scope in bucket.scopes.items():
for _, _ in scope.collections.items():
collections_in_bucket += 1
cols_in_bucket[bucket.name] = collections_in_bucket
# Create clients in SDK client pool
bucket_count = len(buckets)
max_clients = num_threads
clients_per_bucket = int(ceil(max_clients / bucket_count))
for bucket in buckets:
sdk_client_pool.create_clients(
bucket=bucket, servers=[master],
req_clients=min(cols_in_bucket[bucket.name],
clients_per_bucket),
compression_settings=sdk_compression)
def collection_setup(self):
self.bucket_util.add_rbac_user()
# Create bucket(s) and add rbac user
if self.bucket_storage == Bucket.StorageBackend.magma:
# get the TTL value
buckets_spec_from_conf = \
self.bucket_util.get_bucket_template_from_package(
self.spec_name)
bucket_ttl = buckets_spec_from_conf.get(Bucket.maxTTL, 0)
# Blindly override the bucket spec if the backend storage is magma.
# So, Bucket spec in conf file will not take any effect.
self.spec_name = "single_bucket.bucket_for_magma_collections"
magma_bucket_spec = \
self.bucket_util.get_bucket_template_from_package(
self.spec_name)
magma_bucket_spec[Bucket.maxTTL] = bucket_ttl
buckets_spec = magma_bucket_spec
else:
buckets_spec = self.bucket_util.get_bucket_template_from_package(
self.spec_name)
doc_loading_spec = \
self.bucket_util.get_crud_template_from_package(
self.data_spec_name)
# Process params to over_ride values if required
self.over_ride_bucket_template_params(buckets_spec)
self.over_ride_doc_loading_template_params(doc_loading_spec)
self.bucket_util.create_buckets_using_json_data(buckets_spec)
self.bucket_util.wait_for_collection_creation_to_complete()
# Prints bucket stats before doc_ops
self.bucket_util.print_bucket_stats()
# Init sdk_client_pool if not initialized before
if self.sdk_client_pool is None:
self.init_sdk_pool_object()
self.log.info("Creating required SDK clients for client_pool")
self.create_sdk_clients(self.task_manager.number_of_threads,
self.cluster.master,
self.bucket_util.buckets,
self.sdk_client_pool,
self.sdk_compression)
doc_loading_task = \
self.bucket_util.run_scenario_from_spec(
self.task,
self.cluster,
self.bucket_util.buckets,
doc_loading_spec,
mutation_num=0,
batch_size=self.batch_size)
if doc_loading_task.result is False:
self.fail("Initial doc_loading failed")
self.cluster_util.print_cluster_stats()
ttl_buckets = [
"multi_bucket.buckets_for_rebalance_tests_with_ttl",
"multi_bucket.buckets_all_membase_for_rebalance_tests_with_ttl",
"volume_templates.buckets_for_volume_tests_with_ttl"]
# Verify initial doc load count
self.bucket_util._wait_for_stats_all_buckets()
if self.spec_name not in ttl_buckets:
self.bucket_util.validate_docs_per_collections_all_buckets()
# Prints bucket stats after doc_ops
self.bucket_util.print_bucket_stats()
def over_ride_bucket_template_params(self, bucket_spec):
if self.bucket_storage == Bucket.StorageBackend.magma:
# Blindly override the following params
bucket_spec[Bucket.evictionPolicy] = \
Bucket.EvictionPolicy.FULL_EVICTION
else:
for key, val in self.input.test_params.items():
if key == "replicas":
bucket_spec[Bucket.replicaNumber] = self.num_replicas
elif key == "bucket_size":
bucket_spec[Bucket.ramQuotaMB] = self.bucket_size
elif key == "num_items":
bucket_spec[MetaConstants.NUM_ITEMS_PER_COLLECTION] = \
self.num_items
elif key == "remove_default_collection":
bucket_spec[MetaConstants.REMOVE_DEFAULT_COLLECTION] = \
self.input.param(key)
elif key == "bucket_storage":
bucket_spec[Bucket.storageBackend] = self.bucket_storage
elif key == "compression_mode":
bucket_spec[Bucket.compressionMode] = self.compression_mode
elif key == "flushEnabled":
bucket_spec[Bucket.flushEnabled] = int(self.flush_enabled)
elif key == "bucket_type":
bucket_spec[Bucket.bucketType] = self.bucket_type
def over_ride_doc_loading_template_params(self, target_spec):
for key, value in self.input.test_params.items():
if key == "durability":
target_spec[MetaCrudParams.DURABILITY_LEVEL] = \
self.durability_level
elif key == "sdk_timeout":
target_spec[MetaCrudParams.SDK_TIMEOUT] = self.sdk_timeout
elif key == "doc_size":
target_spec[MetaCrudParams.DocCrud.DOC_SIZE] = self.doc_size
def load_data_for_sub_doc_ops(self, verification_dict=None):
new_data_load_template = \
self.bucket_util.get_crud_template_from_package("initial_load")
new_data_load_template[MetaCrudParams.DURABILITY_LEVEL] = ""
new_data_load_template["doc_crud"][
MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION] = 100
new_data_load_template["subdoc_crud"][
MetaCrudParams.SubDocCrud.INSERT_PER_COLLECTION] = 50
doc_loading_task = \
self.bucket_util.run_scenario_from_spec(
self.task,
self.cluster,
self.bucket_util.buckets,
new_data_load_template,
mutation_num=0,
batch_size=self.batch_size)
if doc_loading_task.result is False:
self.fail("Extra doc loading task failed")
if verification_dict:
self.update_verification_dict_from_collection_task(
verification_dict,
doc_loading_task)
def update_verification_dict_from_collection_task(self,
verification_dict,
doc_loading_task):
for bucket, s_dict in doc_loading_task.loader_spec.items():
for s_name, c_dict in s_dict["scopes"].items():
for c_name, _ in c_dict["collections"].items():
c_crud_data = doc_loading_task.loader_spec[
bucket]["scopes"][
s_name]["collections"][c_name]
for op_type in c_crud_data.keys():
total_mutation = \
c_crud_data[op_type]["doc_gen"].end \
- c_crud_data[op_type]["doc_gen"].start
if op_type in DocLoading.Bucket.DOC_OPS:
verification_dict["ops_%s" % op_type] \
+= total_mutation
elif op_type in DocLoading.Bucket.SUB_DOC_OPS:
verification_dict["ops_update"] \
+= total_mutation
if c_crud_data[op_type]["durability_level"] \
in self.supported_d_levels:
verification_dict["sync_write_committed_count"] \
+= total_mutation
def validate_cruds_from_collection_mutation(self, doc_loading_task):
# Read all the values to validate the CRUDs
for bucket, s_dict in doc_loading_task.loader_spec.items():
client = self.sdk_client_pool.get_client_for_bucket(bucket)
for s_name, c_dict in s_dict["scopes"].items():
for c_name, _ in c_dict["collections"].items():
c_crud_data = doc_loading_task.loader_spec[
bucket]["scopes"][
s_name]["collections"][c_name]
client.select_collection(s_name, c_name)
for op_type in c_crud_data.keys():
doc_gen = c_crud_data[op_type]["doc_gen"]
is_sub_doc = False
if op_type in DocLoading.Bucket.SUB_DOC_OPS:
is_sub_doc = True
task = self.task.async_validate_docs(
self.cluster, bucket,
doc_gen, op_type,
scope=s_name, collection=c_name,
batch_size=self.batch_size,
process_concurrency=self.process_concurrency,
sdk_client_pool=self.sdk_client_pool,
is_sub_doc=is_sub_doc)
self.task_manager.get_task_result(task)
| 47.220339
| 80
| 0.603733
|
794a413487332e25d9e4671a3f84cf70ed55b5ae
| 740
|
py
|
Python
|
us2/python/fehler.py
|
chrbeckm/anfaenger-praktikum
|
51764ff23901de1bc3d16dc935acfdc66bb2b2b7
|
[
"MIT"
] | 2
|
2019-12-10T10:25:11.000Z
|
2021-01-26T13:59:40.000Z
|
us1/python/fehler.py
|
chrbeckm/anfaenger-praktikum
|
51764ff23901de1bc3d16dc935acfdc66bb2b2b7
|
[
"MIT"
] | null | null | null |
us1/python/fehler.py
|
chrbeckm/anfaenger-praktikum
|
51764ff23901de1bc3d16dc935acfdc66bb2b2b7
|
[
"MIT"
] | 1
|
2020-12-06T21:24:58.000Z
|
2020-12-06T21:24:58.000Z
|
import sympy
import numpy as np
def error(f, err_vars=None):
from sympy import Symbol, latex
s = 0
latex_names = dict()
if err_vars == None:
err_vars = f.free_symbols
for v in err_vars:
err = Symbol('latex_std_' + v.name)
s += f.diff(v)**2 * err**2
latex_names[err] = '\\sigma_{' + latex(v) + '}'
return latex(sympy.sqrt(s), symbol_names=latex_names)
# fehlerbehaftete Variablen mit , getrennt definieren, in Klammern den Tex-Namen schreiben
r, i = sympy.var('R I_1')
# Formel angeben
u = r * i
print(u)
print(error(u))
print()
# Textdatei, weil ich sonst nicht weiß ob make das kann
r = np.linspace(0,1)
# np.savetxt('build/fehler.txt', (r))
np.savetxt('build/fehler.txt', r)
| 23.125
| 90
| 0.643243
|
794a41c1e6e41ffe8181654bbc24992600fae848
| 288
|
py
|
Python
|
lfluxproject/lstory/forms.py
|
lutoma/lflux
|
5cb51d4dfda8caf7be3bb621bcb991bc175f5e38
|
[
"MIT"
] | null | null | null |
lfluxproject/lstory/forms.py
|
lutoma/lflux
|
5cb51d4dfda8caf7be3bb621bcb991bc175f5e38
|
[
"MIT"
] | null | null | null |
lfluxproject/lstory/forms.py
|
lutoma/lflux
|
5cb51d4dfda8caf7be3bb621bcb991bc175f5e38
|
[
"MIT"
] | null | null | null |
from django import forms
from .models import StorySummary
from django.utils.translation import ugettext_lazy as _
class StorySummaryForm(forms.Form):
body = forms.CharField(widget=forms.Textarea, help_text=_('markdown-formatted summary text consistiong of 2 or 3 list items only!'))
| 41.142857
| 136
| 0.805556
|
794a4310d74335ee35ea8dde2ec424fb8798cda9
| 3,852
|
py
|
Python
|
qa/rpc-tests/test_framework/blocktools.py
|
modong/qtum
|
e2d7f5e7b588443ac10ac31f7af18527e54abcb5
|
[
"MIT"
] | 2
|
2017-07-31T14:18:36.000Z
|
2021-07-19T21:35:56.000Z
|
qa/rpc-tests/test_framework/blocktools.py
|
yelongbao/qtum
|
e2d7f5e7b588443ac10ac31f7af18527e54abcb5
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/test_framework/blocktools.py
|
yelongbao/qtum
|
e2d7f5e7b588443ac10ac31f7af18527e54abcb5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# blocktools.py - utilities for manipulating blocks and transactions
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from .mininode import *
from .script import CScript, OP_TRUE, OP_CHECKSIG, OP_RETURN
# Create a block (with regtest difficulty)
def create_block(hashprev, coinbase, nTime=None):
block = CBlock()
if nTime is None:
import time
block.nTime = int(time.time()+POW_TARGET_SPACING)
else:
block.nTime = nTime
block.hashPrevBlock = hashprev
block.nBits = 0x207fffff # Will break after a difficulty adjustment...
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
# From BIP141
WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed"
# According to BIP141, blocks with witness rules active must commit to the
# hash of all in-block transactions including witness.
def add_witness_commitment(block, nonce=0):
# First calculate the merkle root of the block's
# transactions, with witnesses.
witness_nonce = nonce
witness_root = block.calc_witness_merkle_root()
witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root)+ser_uint256(witness_nonce)))
# witness_nonce should go to coinbase witness.
block.vtx[0].wit.vtxinwit = [CTxInWitness()]
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(witness_nonce)]
# witness commitment is the last OP_RETURN output in coinbase
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment)
block.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, output_data])))
block.vtx[0].rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
def serialize_script_num(value):
r = bytearray(0)
if value == 0:
return r
neg = value < 0
absvalue = -value if neg else value
while (absvalue):
r.append(int(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return r
# Create a coinbase transaction, assuming no miner fees.
# If pubkey is passed in, the coinbase output will be a P2PK output;
# otherwise an anyone-can-spend output.
def create_coinbase(height, pubkey = None):
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff),
CScript() + height + b"\x00", 0xffffffff))
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = int(INITIAL_BLOCK_REWARD) * COIN
#halvings = int(height) # regtest
#coinbaseoutput.nValue >>= halvings
if (pubkey != None):
coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [ coinbaseoutput ]
coinbase.calc_sha256()
return coinbase
# Create a transaction.
# If the scriptPubKey is not specified, make it anyone-can-spend.
def create_transaction(prevtx, n, sig, value, scriptPubKey=CScript()):
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff))
tx.vout.append(CTxOut(value, scriptPubKey))
tx.calc_sha256()
return tx
def get_legacy_sigopcount_block(block, fAccurate=True):
count = 0
for tx in block.vtx:
count += get_legacy_sigopcount_tx(tx, fAccurate)
return count
def get_legacy_sigopcount_tx(tx, fAccurate=True):
count = 0
for i in tx.vout:
count += i.scriptPubKey.GetSigOpCount(fAccurate)
for j in tx.vin:
# scriptSig might be of type bytes, so convert to CScript for the moment
count += CScript(j.scriptSig).GetSigOpCount(fAccurate)
return count
| 36.339623
| 104
| 0.705867
|
794a43cede21670f88a796af1e4b25eb996289bc
| 4,466
|
py
|
Python
|
netbox/tenancy/models.py
|
paxio/netbox
|
55dbbdc4a59f8c1efb87d3d86cef828fd8492aeb
|
[
"Apache-2.0"
] | 1
|
2018-11-07T21:52:41.000Z
|
2018-11-07T21:52:41.000Z
|
netbox/tenancy/models.py
|
paxio/netbox
|
55dbbdc4a59f8c1efb87d3d86cef828fd8492aeb
|
[
"Apache-2.0"
] | 2
|
2018-02-13T11:58:07.000Z
|
2018-03-07T10:45:44.000Z
|
netbox/tenancy/models.py
|
paxio/netbox
|
55dbbdc4a59f8c1efb87d3d86cef828fd8492aeb
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from django.urls import reverse
from django.utils.encoding import python_2_unicode_compatible
from taggit.managers import TaggableManager
from extras.models import CustomFieldModel
from utilities.models import ChangeLoggedModel
from .constants import *
@python_2_unicode_compatible
class TenantGroup(ChangeLoggedModel):
"""
An arbitrary collection of Tenants.
"""
name = models.CharField(
max_length=50,
unique=True
)
slug = models.SlugField(
unique=True
)
csv_headers = ['name', 'slug']
class Meta:
ordering = ['name']
verbose_name = 'Service Provider'
verbose_name_plural = 'Service Providers'
def __str__(self):
return self.name
def get_absolute_url(self):
return "{}?group={}".format(reverse('tenancy:tenant_list'), self.slug)
def to_csv(self):
return (
self.name,
self.slug,
)
@python_2_unicode_compatible
class Tenant(ChangeLoggedModel, CustomFieldModel):
"""
A Tenant represents an organization served by the NetBox owner. This is typically a customer or an internal
department.
"""
name = models.CharField(
max_length=30,
unique=True
)
slug = models.SlugField(
unique=True
)
group = models.ForeignKey(
to='tenancy.TenantGroup',
on_delete=models.SET_NULL,
related_name='tenants',
blank=True,
null=True
)
description = models.CharField(
max_length=100,
blank=True,
help_text='Long-form name (optional)'
)
comments = models.TextField(
blank=True
)
custom_field_values = GenericRelation(
to='extras.CustomFieldValue',
content_type_field='obj_type',
object_id_field='obj_id'
)
tags = TaggableManager()
csv_headers = ['name', 'slug', 'group', 'description', 'comments']
class Meta:
ordering = ['group', 'name']
verbose_name = 'Customer'
verbose_name_plural = 'Customers'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('tenancy:tenant', args=[self.slug])
def to_csv(self):
return (
self.name,
self.slug,
self.group.name if self.group else None,
self.description,
self.comments,
)
@python_2_unicode_compatible
class Package(ChangeLoggedModel, CustomFieldModel):
"""
A Package represents a service delivered to our customers.
"""
name = models.CharField(max_length=30, unique=True)
slug = models.SlugField(unique=True)
ipv4_enabled = models.BooleanField(blank=False, default=True, verbose_name='IPv4 is enabled', help_text='Customers recieve an IPv4 address')
ipv6_enabled = models.BooleanField(blank=False, default=True, verbose_name='IPv6 is enabled', help_text='Customers recieve an IPv6 address')
multicast_enabled = models.BooleanField(blank=False, default=True, verbose_name='Multicast is enabled', help_text='Customers can use multicast')
speed_upload = models.PositiveIntegerField(blank=False, null=False, verbose_name='Upload speed rate (Kbps)')
speed_download = models.PositiveIntegerField(blank=False, null=False, verbose_name='Download speed rate (Kbps)')
qos_profile = models.CharField(max_length=30, unique=False)
comments = models.TextField(
blank=True
)
custom_field_values = GenericRelation(
to='extras.CustomFieldValue',
content_type_field='obj_type',
object_id_field='obj_id'
)
tags = TaggableManager()
csv_headers = ['name', 'slug', 'ipv4_enabled', 'ipv6_enabled', 'multicast_enabled', 'speed_upload', 'speed_download', 'qos_profile']
class Meta:
ordering = ['name']
verbose_name = 'Package'
verbose_name_plural = 'Packages'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('tenancy:package', args=[self.slug])
def to_csv(self):
return (
self.name,
self.slug,
self.ipv4_enabled,
self.ipv6_enabled,
self.multicast_enabled,
self.speed_upload,
self.speed_download,
self.qos_profile,
)
| 28.628205
| 148
| 0.654277
|
794a43f716b0060e7469210bd74d8fe0deead749
| 1,471
|
py
|
Python
|
pylark/api_service_drive_sheet_protected_dimension_delete.py
|
chyroc/pylark
|
a54cce6b814935fd3c72668b262b54c8ee461484
|
[
"Apache-2.0"
] | 7
|
2021-08-18T00:42:05.000Z
|
2022-03-14T09:49:15.000Z
|
pylark/api_service_drive_sheet_protected_dimension_delete.py
|
chyroc/pylark
|
a54cce6b814935fd3c72668b262b54c8ee461484
|
[
"Apache-2.0"
] | null | null | null |
pylark/api_service_drive_sheet_protected_dimension_delete.py
|
chyroc/pylark
|
a54cce6b814935fd3c72668b262b54c8ee461484
|
[
"Apache-2.0"
] | 1
|
2022-03-14T09:49:20.000Z
|
2022-03-14T09:49:20.000Z
|
# Code generated by lark_sdk_gen. DO NOT EDIT.
from pylark.lark_request import RawRequestReq, _new_method_option
from pylark import lark_type, lark_type_sheet, lark_type_approval
import attr
import typing
import io
@attr.s
class DeleteSheetProtectedDimensionReq(object):
spreadsheet_token: str = attr.ib(
default="", metadata={"req_type": "path", "key": "spreadsheetToken"}
) # sheet 的 token,获取方式见[在线表格开发指南](https://open.feishu.cn/document/ukTMukTMukTM/uATMzUjLwEzM14CMxMTN/overview)
protect_ids: typing.List[str] = attr.ib(
factory=lambda: [], metadata={"req_type": "json", "key": "protectIds"}
) # 需要删除的保护范围ID,可以通过[获取表格元数据](https://open.feishu.cn/document/ukTMukTMukTM/uETMzUjLxEzM14SMxMTN)接口获取
@attr.s
class DeleteSheetProtectedDimensionResp(object):
del_protect_ids: typing.List[str] = attr.ib(
factory=lambda: [], metadata={"req_type": "json", "key": "delProtectIds"}
) # 成功删除的保护范围ID
def _gen_delete_sheet_protected_dimension_req(request, options) -> RawRequestReq:
return RawRequestReq(
dataclass=DeleteSheetProtectedDimensionResp,
scope="Drive",
api="DeleteSheetProtectedDimension",
method="DELETE",
url="https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/:spreadsheetToken/protected_range_batch_del",
body=request,
method_option=_new_method_option(options),
need_tenant_access_token=True,
need_user_access_token=True,
)
| 37.717949
| 114
| 0.728756
|
794a45321e9d7243d64bcec36e3fe39374e15842
| 560
|
py
|
Python
|
front/migrations/0002_auto_20201201_1631.py
|
jimixjay/acestats
|
015a26e084fda70ab5754b78ce2e5157fee29d10
|
[
"Apache-2.0"
] | null | null | null |
front/migrations/0002_auto_20201201_1631.py
|
jimixjay/acestats
|
015a26e084fda70ab5754b78ce2e5157fee29d10
|
[
"Apache-2.0"
] | null | null | null |
front/migrations/0002_auto_20201201_1631.py
|
jimixjay/acestats
|
015a26e084fda70ab5754b78ce2e5157fee29d10
|
[
"Apache-2.0"
] | 1
|
2021-01-15T19:56:41.000Z
|
2021-01-15T19:56:41.000Z
|
# Generated by Django 3.1.2 on 2020-12-01 15:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('front', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='MatchStats',
new_name='Match_Stats',
),
migrations.RenameModel(
old_name='TourneyLevel',
new_name='Player_Entry',
),
migrations.RenameModel(
old_name='PlayerEntry',
new_name='Tourney_Level',
),
]
| 21.538462
| 47
| 0.560714
|
794a45e1be8eb8b5a93a3957c328fcacab624832
| 458
|
py
|
Python
|
feature_engineering/utils/preprocessing.py
|
ThorbenJensen/feature-engineering
|
a5f73b29289dd982ab89ea5080186b833c362cfa
|
[
"MIT"
] | 15
|
2019-10-09T08:12:32.000Z
|
2021-01-11T08:20:55.000Z
|
feature_engineering/utils/preprocessing.py
|
ThorbenJensen/feature-engineering
|
a5f73b29289dd982ab89ea5080186b833c362cfa
|
[
"MIT"
] | null | null | null |
feature_engineering/utils/preprocessing.py
|
ThorbenJensen/feature-engineering
|
a5f73b29289dd982ab89ea5080186b833c362cfa
|
[
"MIT"
] | 2
|
2021-03-17T17:30:00.000Z
|
2021-04-07T07:36:21.000Z
|
import pandas as pd
def time_features(dt_series: pd.Series) -> pd.DataFrame:
df = pd.DataFrame({
'year': dt_series.dt.year,
'month': dt_series.dt.month,
'week': dt_series.dt.week,
'weekday': dt_series.dt.weekday,
'hour': dt_series.dt.hour,
})
df_dummies = pd.get_dummies(df, prefix='weekday', columns=['weekday'])
return df_dummies
def parse_date(x):
return pd.datetime.strptime(x, '%Y-%m-%d')
| 25.444444
| 74
| 0.624454
|
794a46d8087238d60bbc4db61ca0e8b5f19b3d05
| 1,560
|
py
|
Python
|
Example5.py
|
cpgoncalves/gameplayer
|
c53c5163bdc00e06c51e2b3532e3e4df6eb96cf5
|
[
"MIT"
] | 1
|
2015-12-28T13:09:03.000Z
|
2015-12-28T13:09:03.000Z
|
Example5.py
|
cpgoncalves/gameplayer
|
c53c5163bdc00e06c51e2b3532e3e4df6eb96cf5
|
[
"MIT"
] | null | null | null |
Example5.py
|
cpgoncalves/gameplayer
|
c53c5163bdc00e06c51e2b3532e3e4df6eb96cf5
|
[
"MIT"
] | null | null | null |
# Carlos Pedro Gonçalves (2015), Game Theory with Python
# Game Theory and Applied A.I. Classes
# Instituto Superior de Ciências Sociais e Políticas (ISCSP)
# University of Lisbon
# cgoncalves@iscsp.ulisboa.pt
#
# New Entrant vs Market Leader (payoffs correspond to strategic value)
#
# For more details see the user manual that comes with the package:
# Gonçalves, C.P. (2015) "Game Player User Manual - A Game Theory Analyzer With Python",
# https://sites.google.com/site/autonomouscomputingsystems/game-player
import gamep # import the game player main module
tree = [] # setup the game tree
# design the tree in accordance with the problem:
# the "No move" is added at a given level whenever the player has no alternative choice
# this allows us to deal with a tree with different branch lengths
gamep.createPath(["Enter","Propose partnership","Accept partnership","No move"], [5,3],tree)
gamep.createPath(["Enter","Propose partnership","Reject partnership","Fight"], [-2,3.5],tree)
gamep.createPath(["Enter","Propose partnership","Reject partnership","Do not fight"], [4,2],tree)
gamep.createPath(["Enter","Do not propose partnership","Fight","No move"], [-1,3],tree)
gamep.createPath(["Enter","Do not propose partnership","Do not fight","No move"], [4,2],tree)
gamep.createPath(["Do not enter","No move","No move","No move"],[0,5],tree)
gamep.showTree(tree)
# play sequence New Entrant plays in the first two levels then the Market Leader plays
# in the next two levels
plays = [0,0,1,1]
gamep.evaluateTree(tree,plays) # evaluate the game tree
| 47.272727
| 97
| 0.740385
|
794a46dc123663c191e7028e7938dccacc1f8175
| 5,760
|
py
|
Python
|
moha/posthf/pt/mp3.py
|
ZhaoYilin/moha
|
d701fd921839474380982db1478e66f0dc8cbd98
|
[
"MIT"
] | 12
|
2019-12-07T18:37:34.000Z
|
2022-03-30T14:23:38.000Z
|
moha/posthf/pt/mp3.py
|
ZhaoYilin/moha
|
d701fd921839474380982db1478e66f0dc8cbd98
|
[
"MIT"
] | null | null | null |
moha/posthf/pt/mp3.py
|
ZhaoYilin/moha
|
d701fd921839474380982db1478e66f0dc8cbd98
|
[
"MIT"
] | 2
|
2019-12-08T05:48:47.000Z
|
2021-10-31T21:40:21.000Z
|
from moha.system.hamiltonian.chemical_hamiltonian import *
from moha.posthf.pt.auxiliary import *
from moha.io.log import log, timer
import numpy as np
import copy
__all__ = ['MP3Solver']
class MP3Solver(object):
"""Third-order Moller-Plesset perturbation solver.
Attributes
----------
ham
Chemical Hamiltonian.
wfn
Hartree Fock wavefunction.
hf_results : dict
Hartree Fock calculation results.
Methods
-------
__init__(self,ham,wfn,hf_results)
Initialize the solver.
kernel(self)
Kernel of the solver.
assign_hamiltonian(self,ham)
Assign the chemical Hamiltonian to the solver.
assign_wavefunction(self,wfn)
Assign the Hartree Fock wavefunction to the solver.
assign_hartree_fock_results(self,hf_results)
Assign the Hartree Fock calculation results to the solver.
"""
def __init__(self,ham,wfn,hf_results):
"""Initialize the solver.
Attributes
----------
ham
Chemical Hamiltonian.
wfn
Hartree Fock wavefunction.
hf_results : dict
Hartree Fock calculation results.
"""
self.assign_hamiltonian(ham)
self.assign_wavefunction(wfn)
self.assign_hartree_fock_results(hf_results)
@timer.with_section("MP3")
def kernel(self):
"""Kernel of the solver.
Returns
-------
results : dict
MP3 calculation results.
"""
log.hline()
log('MP3 Calculation Section'.format())
log.hline()
ham = copy.deepcopy(self.ham)
wfn = copy.deepcopy(self.wfn)
hf_results = self.hf_results
nspatial = ham.nspatial
occ = wfn.occ
C = wfn.coefficients
eorbitals = hf_results['orbital_energies']
Emp2 = 0.0
ham.operators['electron_repulsion'].basis_transformation(C)
Eri = ham.operators['electron_repulsion'].integral
for i in range(occ['alpha']):
for j in range(occ['alpha']):
for a in range(occ['alpha'],nspatial):
for b in range(occ['alpha'],nspatial):
Emp2 += Eri[i,a,j,b]*(2*Eri[i,a,j,b]-Eri[i,b,j,a])\
/(eorbitals[i] + eorbitals[j] -eorbitals[a] - eorbitals[b])
Emp3 = 0.0
Eri = ham.operators['electron_repulsion'].double_bar
for i in range(occ['alpha']):
for j in range(occ['alpha']):
for k in range(occ['alpha']):
for l in range(occ['alpha']):
for a in range(occ['alpha'],nspatial):
for b in range(occ['alpha'],nspatial):
Emp3 += (1/8.0)*Eri[i,j,a,b]*Eri[k,l,i,j]*Eri[a,b,k,l]\
/((eorbitals[i] + eorbitals[j] -eorbitals[a] - eorbitals[b])\
*(eorbitals[k] + eorbitals[l] -eorbitals[a] - eorbitals[b]))
for i in range(occ['alpha']):
for j in range(occ['alpha']):
for a in range(occ['alpha'],nspatial):
for b in range(occ['alpha'],nspatial):
for c in range(occ['alpha'],nspatial):
for d in range(occ['alpha'],nspatial):
Emp3 += (1/8.0)*Eri[i,j,a,b]*Eri[a,b,c,d]*Eri[c,d,i,j]\
/((eorbitals[i] + eorbitals[j] -eorbitals[a] - eorbitals[b])\
*(eorbitals[i] + eorbitals[j] -eorbitals[c] - eorbitals[d]))
for i in range(occ['alpha']):
for j in range(occ['alpha']):
for k in range(occ['alpha']):
for a in range(occ['alpha'],nspatial):
for b in range(occ['alpha'],nspatial):
for c in range(occ['alpha'],nspatial):
Emp3 += Eri[i,j,a,b]*Eri[k,b,c,j]*Eri[a,c,i,k]\
/((eorbitals[i] + eorbitals[j] -eorbitals[a] - eorbitals[b])\
*(eorbitals[i] + eorbitals[k] -eorbitals[c] - eorbitals[c]))
log.hline()
log('MP3 Results'.format())
log.hline()
log('{0:2s} {1:3f}'.format('Escf', hf_results['total_energy']))
log('{0:2s} {1:3f}'.format('Emp2', Emp2))
log('{0:2s} {1:3f}'.format('Emp3', Emp3))
log('{0:2s} {1:3f}'.format('Etot', hf_results['total_energy']+Emp2+Emp3))
log.hline()
results = {
"success": True,
"MP2_energy":Emp2,
"MP3_energy":Emp3,
"total_energy":hf_results['total_energy']+Emp2+Emp3
}
return results
def assign_hamiltonian(self,ham):
"""Assign the chemical Hamiltonian to the solver.
Attributes
----------
ham
Chemical Hamiltonian.
"""
self.ham = ham
def assign_wavefunction(self,wfn):
"""Assign the Hartree Fock wavefunction to the solver.
Attributes
----------
wfn
Hartree Fock wavefunction.
"""
self.wfn = wfn
def assign_hartree_fock_results(self,hf_results):
"""Assign the Hartree Fock calculation results to the solver.
Attributes
----------
hf_results : dict
Hartree Fock calculation results.
Raises
------
TypeError
If Hartree Fock calculation results is not a dictionary.
"""
if not isinstance(hf_results, dict):
raise TypeError("Hartree Fock calculation results must be a dictionary")
self.hf_results = hf_results
| 32.914286
| 93
| 0.522396
|
794a4743d44c3713b8e763be94c58817de2696d9
| 4,984
|
py
|
Python
|
mkdocs/commands/serve.py
|
UnsolvedCypher/mkdocs
|
eb31d4c0d70259755b779bd6bf34609ac2adca7b
|
[
"BSD-2-Clause"
] | null | null | null |
mkdocs/commands/serve.py
|
UnsolvedCypher/mkdocs
|
eb31d4c0d70259755b779bd6bf34609ac2adca7b
|
[
"BSD-2-Clause"
] | null | null | null |
mkdocs/commands/serve.py
|
UnsolvedCypher/mkdocs
|
eb31d4c0d70259755b779bd6bf34609ac2adca7b
|
[
"BSD-2-Clause"
] | null | null | null |
import logging
import shutil
import tempfile
import sys
from os.path import isfile, join
from mkdocs.commands.build import build
from mkdocs.config import load_config
log = logging.getLogger(__name__)
def _init_asyncio_patch():
"""
Select compatible event loop for Tornado 5+.
As of Python 3.8, the default event loop on Windows is `proactor`,
however Tornado requires the old default "selector" event loop.
As Tornado has decided to leave this to users to set, MkDocs needs
to set it. See https://github.com/tornadoweb/tornado/issues/2608.
"""
if sys.platform.startswith("win") and sys.version_info >= (3, 8):
import asyncio
try:
from asyncio import WindowsSelectorEventLoopPolicy
except ImportError:
pass # Can't assign a policy which doesn't exist.
else:
if not isinstance(asyncio.get_event_loop_policy(), WindowsSelectorEventLoopPolicy):
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
def _get_handler(site_dir, StaticFileHandler):
from tornado.template import Loader
class WebHandler(StaticFileHandler):
def write_error(self, status_code, **kwargs):
if status_code in (404, 500):
error_page = '{}.html'.format(status_code)
if isfile(join(site_dir, error_page)):
self.write(Loader(site_dir).load(error_page).generate())
else:
super().write_error(status_code, **kwargs)
return WebHandler
def _livereload(host, port, config, builder, site_dir, watch_theme):
# We are importing here for anyone that has issues with livereload. Even if
# this fails, the --no-livereload alternative should still work.
_init_asyncio_patch()
from livereload import Server
import livereload.handlers
class LiveReloadServer(Server):
def get_web_handlers(self, script):
handlers = super().get_web_handlers(script)
# replace livereload handler
return [(handlers[0][0], _get_handler(site_dir, livereload.handlers.StaticFileHandler), handlers[0][2],)]
server = LiveReloadServer()
# Watch the documentation files, the config file and the theme files.
server.watch(config['docs_dir'], builder)
server.watch(config['config_file_path'], builder)
if watch_theme:
for d in config['theme'].dirs:
server.watch(d, builder)
# Run `serve` plugin events.
server = config['plugins'].run_event('serve', server, config=config, builder=builder)
server.serve(root=site_dir, host=host, port=port, restart_delay=0)
def _static_server(host, port, site_dir):
# Importing here to separate the code paths from the --livereload
# alternative.
_init_asyncio_patch()
from tornado import ioloop
from tornado import web
application = web.Application([
(r"/(.*)", _get_handler(site_dir, web.StaticFileHandler), {
"path": site_dir,
"default_filename": "index.html"
}),
])
application.listen(port=port, address=host)
log.info('Running at: http://%s:%s/', host, port)
log.info('Hold ctrl+c to quit.')
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
log.info('Stopping server...')
def serve(config_file=None, dev_addr=None, strict=None, theme=None,
theme_dir=None, livereload='livereload', watch_theme=False, **kwargs):
"""
Start the MkDocs development server
By default it will serve the documentation on http://localhost:8000/ and
it will rebuild the documentation and refresh the page automatically
whenever a file is edited.
"""
# Create a temporary build directory, and set some options to serve it
# PY2 returns a byte string by default. The Unicode prefix ensures a Unicode
# string is returned. And it makes MkDocs temp dirs easier to identify.
site_dir = tempfile.mkdtemp(prefix='mkdocs_')
def builder():
log.info("Building documentation...")
config = load_config(
config_file=config_file,
dev_addr=dev_addr,
strict=strict,
theme=theme,
theme_dir=theme_dir,
site_dir=site_dir,
**kwargs
)
# Override a few config settings after validation
config['site_url'] = 'http://{}/'.format(config['dev_addr'])
live_server = livereload in ['dirty', 'livereload']
dirty = livereload == 'dirty'
build(config, live_server=live_server, dirty=dirty)
return config
try:
# Perform the initial build
config = builder()
host, port = config['dev_addr']
if livereload in ['livereload', 'dirty']:
_livereload(host, port, config, builder, site_dir, watch_theme)
else:
_static_server(host, port, site_dir)
finally:
shutil.rmtree(site_dir)
| 32.789474
| 117
| 0.656701
|
794a4827b0c5c9313acefd3a365a2dc083876c64
| 4,710
|
py
|
Python
|
mars/tensor/random/uniform.py
|
chineking/mars
|
660098c65bcb389c6bbebc26b2502a9b3af43cf9
|
[
"Apache-2.0"
] | null | null | null |
mars/tensor/random/uniform.py
|
chineking/mars
|
660098c65bcb389c6bbebc26b2502a9b3af43cf9
|
[
"Apache-2.0"
] | null | null | null |
mars/tensor/random/uniform.py
|
chineking/mars
|
660098c65bcb389c6bbebc26b2502a9b3af43cf9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ...serialization.serializables import AnyField
from ..utils import gen_random_seeds
from .core import TensorRandomOperandMixin, handle_array, TensorDistribution
class TensorUniform(TensorDistribution, TensorRandomOperandMixin):
_input_fields_ = ["low", "high"]
_op_type_ = OperandDef.RAND_UNIFORM
_fields_ = "low", "high", "size"
low = AnyField("low")
high = AnyField("high")
_func_name = "uniform"
def __call__(self, low, high, chunk_size=None):
return self.new_tensor([low, high], None, raw_chunk_size=chunk_size)
def uniform(
random_state, low=0.0, high=1.0, size=None, chunk_size=None, gpu=None, dtype=None
):
r"""
Draw samples from a uniform distribution.
Samples are uniformly distributed over the half-open interval
``[low, high)`` (includes low, but excludes high). In other words,
any value within the given interval is equally likely to be drawn
by `uniform`.
Parameters
----------
low : float or array_like of floats, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float or array_like of floats
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``low`` and ``high`` are both scalars.
Otherwise, ``mt.broadcast(low, high).size`` samples are drawn.
chunk_size : int or tuple of int or tuple of ints, optional
Desired chunk size on each dimension
gpu : bool, optional
Allocate the tensor on GPU if True, False as default
dtype : data-type, optional
Data-type of the returned tensor.
Returns
-------
out : Tensor or scalar
Drawn samples from the parameterized uniform distribution.
See Also
--------
randint : Discrete uniform distribution, yielding integers.
random_integers : Discrete uniform distribution over the closed
interval ``[low, high]``.
random_sample : Floats uniformly distributed over ``[0, 1)``.
random : Alias for `random_sample`.
rand : Convenience function that accepts dimensions as input, e.g.,
``rand(2,2)`` would generate a 2-by-2 array of floats,
uniformly distributed over ``[0, 1)``.
Notes
-----
The probability density function of the uniform distribution is
.. math:: p(x) = \frac{1}{b - a}
anywhere within the interval ``[a, b)``, and zero elsewhere.
When ``high`` == ``low``, values of ``low`` will be returned.
If ``high`` < ``low``, the results are officially undefined
and may eventually raise an error, i.e. do not rely on this
function to behave when passed arguments satisfying that
inequality condition.
Examples
--------
Draw samples from the distribution:
>>> import mars.tensor as mt
>>> s = mt.random.uniform(-1,0,1000)
All values are within the given interval:
>>> mt.all(s >= -1).execute()
True
>>> mt.all(s < 0).execute()
True
Display the histogram of the samples, along with the
probability density function:
>>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(s.execute(), 15, normed=True)
>>> plt.plot(bins, mt.ones_like(bins).execute(), linewidth=2, color='r')
>>> plt.show()
"""
if dtype is None:
dtype = (
np.random.RandomState()
.uniform(handle_array(low), handle_array(high), size=(0,))
.dtype
)
size = random_state._handle_size(size)
seed = gen_random_seeds(1, random_state.to_numpy())[0]
op = TensorUniform(size=size, seed=seed, gpu=gpu, dtype=dtype)
return op(low, high, chunk_size=chunk_size)
| 35.681818
| 85
| 0.663057
|
794a48d05c208a9725ec7da9be6a381ac95c7fd3
| 2,072
|
py
|
Python
|
eve_sqlalchemy/structures.py
|
gllmbernard/eve-sqlalchemy
|
48a58efdb335bd881e6e67f3b01e62c8443e90af
|
[
"BSD-3-Clause"
] | 126
|
2017-03-09T07:29:32.000Z
|
2022-02-08T07:56:07.000Z
|
eve_sqlalchemy/structures.py
|
gllmbernard/eve-sqlalchemy
|
48a58efdb335bd881e6e67f3b01e62c8443e90af
|
[
"BSD-3-Clause"
] | 114
|
2015-01-09T15:19:48.000Z
|
2017-03-08T13:36:17.000Z
|
eve_sqlalchemy/structures.py
|
gllmbernard/eve-sqlalchemy
|
48a58efdb335bd881e6e67f3b01e62c8443e90af
|
[
"BSD-3-Clause"
] | 55
|
2017-03-16T11:12:44.000Z
|
2021-12-28T00:19:03.000Z
|
# -*- coding: utf-8 -*-
"""
These classes provide a middle layer to transform a SQLAlchemy query into
a series of object that Eve understands and can be rendered as JSON.
:copyright: (c) 2013 by Andrew Mleczko and Tomasz Jezierski (Tefnet)
:license: BSD, see LICENSE for more details.
"""
from __future__ import unicode_literals
from .utils import sqla_object_to_dict
class SQLAResultCollection(object):
"""
Collection of results. The object holds onto a Flask-SQLAlchemy query
object and serves a generator off it.
:param query: Base SQLAlchemy query object for the requested resource
:param fields: fields to be rendered in the response, as a list of strings
:param spec: filter to be applied to the query
:param sort: sorting requirements
:param max_results: number of entries to be returned per page
:param page: page requested
"""
def __init__(self, query, fields, **kwargs):
self._query = query
self._fields = fields
self._spec = kwargs.get('spec')
self._sort = kwargs.get('sort')
self._max_results = kwargs.get('max_results')
self._page = kwargs.get('page')
self._resource = kwargs.get('resource')
if self._spec:
self._query = self._query.filter(*self._spec)
if self._sort:
for (order_by, joins) in self._sort:
self._query = self._query.filter(*joins).order_by(order_by)
# save the count of items to an internal variables before applying the
# limit to the query as that screws the count returned by it
self._count = self._query.count()
if self._max_results:
self._query = self._query.limit(self._max_results)
if self._page:
self._query = self._query.offset((self._page - 1) *
self._max_results)
def __iter__(self):
for i in self._query:
yield sqla_object_to_dict(i, self._fields)
def count(self, **kwargs):
return self._count
| 37
| 78
| 0.643822
|
794a48d80b9373f10e21af0300b5377a017308f3
| 4,126
|
py
|
Python
|
src/apps/metas/forms.py
|
SGC-Tlaxcala/cerebro
|
6c842f66d849065a70002fccdb1eaca1e3d61d99
|
[
"MIT"
] | null | null | null |
src/apps/metas/forms.py
|
SGC-Tlaxcala/cerebro
|
6c842f66d849065a70002fccdb1eaca1e3d61d99
|
[
"MIT"
] | 48
|
2017-04-21T17:35:23.000Z
|
2020-08-29T04:19:35.000Z
|
src/apps/metas/forms.py
|
SGC-Tlaxcala/cerebro
|
6c842f66d849065a70002fccdb1eaca1e3d61d99
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# app: metas
# module: forms
# fecha: miércoles, 23 de mayo de 2018 - 10:22
# description: Formularios de las Metas SPEN
# pylint: disable=W0613,R0201,R0903
from crispy_forms.layout import Layout, Submit, Div, Field
from crispy_forms.helper import FormHelper
from django import forms
from apps.metas.models import Proof, Goal, Role, Site, Member
class ProofForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ProofForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Div(
Field('member', wrapper_class='col-md-4'),
Field('goal', wrapper_class='col-md-2'),
Field('date', wrapper_class='col-md-2'),
css_class='row'
)
)
self.helper.add_input(Submit('submit', 'Enviar'))
class Meta:
model = Proof
exclude = ['fields', ]
class AddSiteForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AddSiteForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Div(
Field('site', wrapper_class='col-md-2 col-sm-4'),
Field('name', wrapper_class='col-md-5'),
Field('address', wrapper_class='col-md-7'),
css_class='row'
)
)
self.helper.add_input(Submit('submit', 'Enviar'))
class Meta:
model = Site
fields = '__all__'
class AddRolForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AddRolForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Div(
Field('clave', wrapper_class='col-md-2 col-sm-4'),
Field('order', wrapper_class='col-md-2 col-sm-3'),
css_class='row'
),
Div(
Field('description', wrapper_class='col-md-6'),
css_class='row'
)
)
self.helper.add_input(Submit('submit', 'Enviar'))
class Meta:
model = Role
fields = '__all__'
class AddMemberForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AddMemberForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Div(
Field('name', wrapper_class='col-md-7'),
css_class='row'
),
Div(
Field('mail', wrapper_class='col-md-6'),
css_class='row'
),
Div(
Div(Field('role'), css_class='col-md-4'),
Div(Field('site'), css_class='col-md-4'),
css_class='row'
)
)
self.helper.add_input(Submit('submit', 'Enviar'))
class Meta:
model = Member
fields = '__all__'
class AddGoalForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AddGoalForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Div(
Field('role', wrapper_class='col-md-2 col-sm-6'),
css_class='row'
),
Div(
Field('key', wrapper_class='col-md-2 col-sm-4'),
Field('name', wrapper_class='col-md-4 col-sm-8'),
Field('year', wrapper_class='col-md-2 col-sm-3'),
Field('loops', wrapper_class='col-md-2 col-sm-3'),
css_class='row'
),
Div(
Field('description', wrapper_class='col', rows='2'),
Field('support', wrapper_class='col'),
css_class='row'
),
Div(
Field('fields', wrapper_class='col', rows='3'),
css_class='row'
)
)
self.helper.add_input(Submit('submit', 'Enviar'))
class Meta:
model = Goal
exclude = ['user', 'created', 'updated']
| 31.257576
| 68
| 0.517208
|
794a4abedaf6772fe72ada5d76078460687c97f7
| 559
|
py
|
Python
|
Session2_2019/deleteAndEarn.py
|
vedantc6/LCode
|
43aec4da9cc22ef43e877a16dbee380b98d9089f
|
[
"MIT"
] | 1
|
2018-09-21T10:51:15.000Z
|
2018-09-21T10:51:15.000Z
|
Session2_2019/deleteAndEarn.py
|
vedantc6/LCode
|
43aec4da9cc22ef43e877a16dbee380b98d9089f
|
[
"MIT"
] | null | null | null |
Session2_2019/deleteAndEarn.py
|
vedantc6/LCode
|
43aec4da9cc22ef43e877a16dbee380b98d9089f
|
[
"MIT"
] | null | null | null |
class Solution(object):
def deleteAndEarn(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
if len(nums) == 1:
return nums[0]
maxval = max(nums)
freqs = [0]*(maxval + 1)
sums = [0]*(maxval + 1)
for val in nums:
freqs[val] += 1
sums[0] = freqs[0]
sums[1] = freqs[1]
for i in range(2, maxval+1):
sums[i] = max(sums[i-2] + freqs[i]*i, sums[i-1])
return sums[maxval]
| 21.5
| 60
| 0.440072
|
794a4aef3004aa9ee4aafcb4aca32ea0bb255b64
| 343
|
py
|
Python
|
Desafios/Desafio010.py
|
OtavioCampagnoli/Aprendendo-Python
|
37bf341d3edbb5392c1ccf866ac0109d5905f68f
|
[
"MIT"
] | null | null | null |
Desafios/Desafio010.py
|
OtavioCampagnoli/Aprendendo-Python
|
37bf341d3edbb5392c1ccf866ac0109d5905f68f
|
[
"MIT"
] | null | null | null |
Desafios/Desafio010.py
|
OtavioCampagnoli/Aprendendo-Python
|
37bf341d3edbb5392c1ccf866ac0109d5905f68f
|
[
"MIT"
] | null | null | null |
#Crie um programa que leia quanto dinheiro uma pessoa tem na carteira e mostre quantos Dólares ela pode comprar
# Cotação do dia 25/11/2021 $5.56.
real = float(input('Informe a quantidade de dinheiro que você tem na carteira:'))
conversaoDolar: float = real / 5.56
print(f'O valor de R${real:.2f} convertido em dólar é ${conversaoDolar:.2f}')
| 57.166667
| 111
| 0.752187
|
794a4b58ff5e12a28b6d758a8e203c96ddfaa911
| 3,069
|
py
|
Python
|
src/orion/core/utils/singleton.py
|
obilaniu/orion
|
bc886daf791d66490b59e43657f6f6db45d34ea8
|
[
"BSD-3-Clause"
] | 1
|
2021-04-10T16:18:03.000Z
|
2021-04-10T16:18:03.000Z
|
src/orion/core/utils/singleton.py
|
obilaniu/orion
|
bc886daf791d66490b59e43657f6f6db45d34ea8
|
[
"BSD-3-Clause"
] | null | null | null |
src/orion/core/utils/singleton.py
|
obilaniu/orion
|
bc886daf791d66490b59e43657f6f6db45d34ea8
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Singleton helpers and boilerplate
=================================
"""
from abc import ABCMeta
from orion.core.utils import Factory
class SingletonAlreadyInstantiatedError(ValueError):
"""Exception to be raised when someone provides arguments to build
an object from a already-instantiated `SingletonType` class.
"""
def __init__(self, name):
"""Pass the same constant message to ValueError underneath."""
super().__init__(
"A singleton instance of (type: {}) has already been instantiated.".format(
name
)
)
class SingletonNotInstantiatedError(TypeError):
"""Exception to be raised when someone try to access an instance
of a singleton that has not been instantiated yet
"""
def __init__(self, name):
"""Pass the same constant message to TypeError underneath."""
super().__init__("No singleton instance of (type: {}) was created".format(name))
class SingletonType(type):
"""Metaclass that implements the singleton pattern for a Python class."""
def __init__(cls, name, bases, dictionary):
"""Create a class instance variable and initiate it to None object."""
super(SingletonType, cls).__init__(name, bases, dictionary)
cls.instance = None
def __call__(cls, *args, **kwargs):
"""Create an object if does not already exist, otherwise return what there is."""
if cls.instance is None:
try:
cls.instance = super(SingletonType, cls).__call__(*args, **kwargs)
except TypeError as exception:
raise SingletonNotInstantiatedError(cls.__name__) from exception
elif args or kwargs:
raise SingletonAlreadyInstantiatedError(cls.__name__)
return cls.instance
class AbstractSingletonType(SingletonType, ABCMeta):
"""This will create singleton base classes, that need to be subclassed and implemented."""
pass
class SingletonFactory(AbstractSingletonType, Factory):
"""Wrapping `orion.core.utils.Factory` with `SingletonType`. Keep compatibility with
`AbstractSingletonType`."""
pass
def update_singletons(values=None):
"""Replace singletons by given values and return previous singleton objects"""
if values is None:
values = {}
# Avoiding circular import problems when importing this module.
from orion.core.io.database import Database
from orion.core.io.database.ephemeraldb import EphemeralDB
from orion.core.io.database.mongodb import MongoDB
from orion.core.io.database.pickleddb import PickledDB
from orion.storage.base import Storage
from orion.storage.legacy import Legacy
from orion.storage.track import Track
singletons = (Storage, Legacy, Database, MongoDB, PickledDB, EphemeralDB, Track)
updated_singletons = {}
for singleton in singletons:
updated_singletons[singleton] = singleton.instance
singleton.instance = values.get(singleton, None)
return updated_singletons
| 33
| 94
| 0.687195
|
794a4b8acaef2b9ff3cf99eee2ba6720ecc65e25
| 12,076
|
py
|
Python
|
src/oscar/apps/customer/wishlists/views.py
|
capme/d-shp
|
b1614032f945ab82594729e177885784148f7605
|
[
"BSD-3-Clause"
] | 68
|
2016-11-06T05:07:57.000Z
|
2021-12-17T09:17:38.000Z
|
src/oscar/apps/customer/wishlists/views.py
|
capme/d-shp
|
b1614032f945ab82594729e177885784148f7605
|
[
"BSD-3-Clause"
] | 1
|
2017-07-28T19:35:07.000Z
|
2017-07-28T19:35:07.000Z
|
src/oscar/apps/customer/wishlists/views.py
|
capme/d-shp
|
b1614032f945ab82594729e177885784148f7605
|
[
"BSD-3-Clause"
] | 28
|
2016-12-04T07:12:50.000Z
|
2021-02-06T21:13:15.000Z
|
# -*- coding: utf-8 -*-
from django.contrib import messages
from django.core.exceptions import (
MultipleObjectsReturned, ObjectDoesNotExist, PermissionDenied)
from django.core.urlresolvers import reverse
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect
from django.utils.translation import ugettext_lazy as _
from django.views.generic import (
CreateView, DeleteView, FormView, ListView, UpdateView, View)
from oscar.core.loading import get_class, get_classes, get_model
from oscar.core.utils import redirect_to_referrer, safe_referrer
WishList = get_model('wishlists', 'WishList')
Line = get_model('wishlists', 'Line')
Product = get_model('catalogue', 'Product')
WishListForm, LineFormset = get_classes('wishlists.forms',
['WishListForm', 'LineFormset'])
PageTitleMixin = get_class('customer.mixins', 'PageTitleMixin')
class WishListListView(PageTitleMixin, ListView):
context_object_name = active_tab = "wishlists"
template_name = 'customer/wishlists/wishlists_list.html'
page_title = _('Wish Lists')
def get_queryset(self):
return self.request.user.wishlists.all()
class WishListDetailView(PageTitleMixin, FormView):
"""
This view acts as a DetailView for a wish list and allows updating the
quantities of products.
It is implemented as FormView because it's easier to adapt a FormView to
display a product then adapt a DetailView to handle form validation.
"""
template_name = 'customer/wishlists/wishlists_detail.html'
active_tab = "wishlists"
form_class = LineFormset
def dispatch(self, request, *args, **kwargs):
self.object = self.get_wishlist_or_404(kwargs['key'], request.user)
return super(WishListDetailView, self).dispatch(request, *args,
**kwargs)
def get_wishlist_or_404(self, key, user):
wishlist = get_object_or_404(WishList, key=key)
if wishlist.is_allowed_to_see(user):
return wishlist
else:
raise Http404
def get_page_title(self):
return self.object.name
def get_form_kwargs(self):
kwargs = super(WishListDetailView, self).get_form_kwargs()
kwargs['instance'] = self.object
return kwargs
def get_context_data(self, **kwargs):
ctx = super(WishListDetailView, self).get_context_data(**kwargs)
ctx['wishlist'] = self.object
other_wishlists = self.request.user.wishlists.exclude(
pk=self.object.pk)
ctx['other_wishlists'] = other_wishlists
return ctx
def form_valid(self, form):
for subform in form:
if subform.cleaned_data['quantity'] <= 0:
subform.instance.delete()
else:
subform.save()
messages.success(self.request, _('Quantities updated.'))
return redirect('customer:wishlists-detail', key=self.object.key)
class WishListCreateView(PageTitleMixin, CreateView):
"""
Create a new wishlist
If a product ID is assed as a kwargs, then this product will be added to
the wishlist.
"""
model = WishList
template_name = 'customer/wishlists/wishlists_form.html'
active_tab = "wishlists"
page_title = _('Create a new wish list')
form_class = WishListForm
product = None
def dispatch(self, request, *args, **kwargs):
if 'product_pk' in kwargs:
try:
self.product = Product.objects.get(pk=kwargs['product_pk'])
except ObjectDoesNotExist:
messages.error(
request, _("The requested product no longer exists"))
return redirect('wishlists-create')
return super(WishListCreateView, self).dispatch(
request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(WishListCreateView, self).get_context_data(**kwargs)
ctx['product'] = self.product
return ctx
def get_form_kwargs(self):
kwargs = super(WishListCreateView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
wishlist = form.save()
if self.product:
wishlist.add(self.product)
msg = _("Your wishlist has been created and '%(name)s "
"has been added") \
% {'name': self.product.get_title()}
else:
msg = _("Your wishlist has been created")
messages.success(self.request, msg)
return redirect(wishlist.get_absolute_url())
class WishListCreateWithProductView(View):
"""
Create a wish list and immediately add a product to it
"""
def post(self, request, *args, **kwargs):
product = get_object_or_404(Product, pk=kwargs['product_pk'])
wishlists = request.user.wishlists.all()
if len(wishlists) == 0:
wishlist = request.user.wishlists.create()
else:
# This shouldn't really happen but we default to using the first
# wishlist for a user if one already exists when they make this
# request.
wishlist = wishlists[0]
wishlist.add(product)
messages.success(
request, _("%(title)s has been added to your wishlist") % {
'title': product.get_title()})
return redirect_to_referrer(request, wishlist.get_absolute_url())
class WishListUpdateView(PageTitleMixin, UpdateView):
model = WishList
template_name = 'customer/wishlists/wishlists_form.html'
active_tab = "wishlists"
form_class = WishListForm
context_object_name = 'wishlist'
def get_page_title(self):
return self.object.name
def get_object(self, queryset=None):
return get_object_or_404(WishList, owner=self.request.user,
key=self.kwargs['key'])
def get_form_kwargs(self):
kwargs = super(WishListUpdateView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def get_success_url(self):
messages.success(
self.request, _("Your '%s' wishlist has been updated")
% self.object.name)
return reverse('customer:wishlists-list')
class WishListDeleteView(PageTitleMixin, DeleteView):
model = WishList
template_name = 'customer/wishlists/wishlists_delete.html'
active_tab = "wishlists"
def get_page_title(self):
return _(u'Delete %s') % self.object.name
def get_object(self, queryset=None):
return get_object_or_404(WishList, owner=self.request.user,
key=self.kwargs['key'])
def get_success_url(self):
messages.success(
self.request, _("Your '%s' wish list has been deleted")
% self.object.name)
return reverse('customer:wishlists-list')
class WishListAddProduct(View):
"""
Adds a product to a wish list.
- If the user doesn't already have a wishlist then it will be created for
them.
- If the product is already in the wish list, its quantity is increased.
"""
def dispatch(self, request, *args, **kwargs):
self.product = get_object_or_404(Product, pk=kwargs['product_pk'])
self.wishlist = self.get_or_create_wishlist(request, *args, **kwargs)
return super(WishListAddProduct, self).dispatch(request)
def get_or_create_wishlist(self, request, *args, **kwargs):
if 'key' in kwargs:
wishlist = get_object_or_404(
WishList, key=kwargs['key'], owner=request.user)
else:
wishlists = request.user.wishlists.all()[:1]
if not wishlists:
return request.user.wishlists.create()
wishlist = wishlists[0]
if not wishlist.is_allowed_to_edit(request.user):
raise PermissionDenied
return wishlist
def get(self, request, *args, **kwargs):
# This is nasty as we shouldn't be performing write operations on a GET
# request. It's only included as the UI of the product detail page
# allows a wishlist to be selected from a dropdown.
return self.add_product()
def post(self, request, *args, **kwargs):
return self.add_product()
def add_product(self):
self.wishlist.add(self.product)
msg = _("'%s' was added to your wish list.") % self.product.get_title()
messages.success(self.request, msg)
return redirect_to_referrer(
self.request, self.product.get_absolute_url())
class LineMixin(object):
"""
Handles fetching both a wish list and a product
Views using this mixin must be passed two keyword arguments:
* key: The key of a wish list
* line_pk: The primary key of the wish list line
or
* product_pk: The primary key of the product
"""
def fetch_line(self, user, wishlist_key, line_pk=None, product_pk=None):
self.wishlist = WishList._default_manager.get(
owner=user, key=wishlist_key)
if line_pk is not None:
self.line = self.wishlist.lines.get(pk=line_pk)
else:
self.line = self.wishlist.lines.get(product_id=product_pk)
self.product = self.line.product
class WishListRemoveProduct(LineMixin, PageTitleMixin, DeleteView):
template_name = 'customer/wishlists/wishlists_delete_product.html'
active_tab = "wishlists"
def get_page_title(self):
return _(u'Remove %s') % self.object.get_title()
def get_object(self, queryset=None):
self.fetch_line(
self.request.user, self.kwargs['key'],
self.kwargs.get('line_pk'), self.kwargs.get('product_pk'))
return self.line
def get_context_data(self, **kwargs):
ctx = super(WishListRemoveProduct, self).get_context_data(**kwargs)
ctx['wishlist'] = self.wishlist
ctx['product'] = self.product
return ctx
def get_success_url(self):
msg = _("'%(title)s' was removed from your '%(name)s' wish list") % {
'title': self.line.get_title(),
'name': self.wishlist.name}
messages.success(self.request, msg)
# We post directly to this view on product pages; and should send the
# user back there if that was the case
referrer = safe_referrer(self.request, '')
if (referrer and self.product and
self.product.get_absolute_url() in referrer):
return referrer
else:
return reverse(
'customer:wishlists-detail', kwargs={'key': self.wishlist.key})
class WishListMoveProductToAnotherWishList(LineMixin, View):
def dispatch(self, request, *args, **kwargs):
try:
self.fetch_line(request.user, kwargs['key'],
line_pk=kwargs['line_pk'])
except (ObjectDoesNotExist, MultipleObjectsReturned):
raise Http404
return super(WishListMoveProductToAnotherWishList, self).dispatch(
request, *args, **kwargs)
def get(self, request, *args, **kwargs):
to_wishlist = get_object_or_404(
WishList, owner=request.user, key=kwargs['to_key'])
if to_wishlist.lines.filter(product=self.line.product).count() > 0:
msg = _("Wish list '%(name)s' already containing '%(title)s'") % {
'title': self.product.get_title(),
'name': to_wishlist.name}
messages.error(self.request, msg)
else:
self.line.wishlist = to_wishlist
self.line.save()
msg = _("'%(title)s' moved to '%(name)s' wishlist") % {
'title': self.product.get_title(),
'name': to_wishlist.name}
messages.success(self.request, msg)
default_url = reverse(
'customer:wishlists-detail', kwargs={'key': self.wishlist.key})
return redirect_to_referrer(self.request, default_url)
| 36.264264
| 79
| 0.637628
|
794a4c0e10693385b05de46189e9aa2556806019
| 1,021
|
py
|
Python
|
management/wwwconfig.py
|
kiekerjan/mailinabox
|
acc9ebd68f351209b5fc895f50b2b998c9fb9d18
|
[
"CC0-1.0"
] | null | null | null |
management/wwwconfig.py
|
kiekerjan/mailinabox
|
acc9ebd68f351209b5fc895f50b2b998c9fb9d18
|
[
"CC0-1.0"
] | null | null | null |
management/wwwconfig.py
|
kiekerjan/mailinabox
|
acc9ebd68f351209b5fc895f50b2b998c9fb9d18
|
[
"CC0-1.0"
] | null | null | null |
import os.path, idna, sys, collections
def get_www_domains(domains_to_skip):
# Returns the domain names (IDNA-encoded) of all of the domains that are configured to serve www
# on the system.
domains = []
try:
# read a line from text file
with open("/etc/miabwwwdomains.conf") as file_in:
for line in file_in:
# Valid domain check future extention: use validators module
# Only one dot allowed
if line.count('.') == 1:
www_domain = get_domain(line, as_unicode=False)
if www_domain not in domains_to_skip:
domains.append(www_domain)
except:
# ignore failures
pass
return set(domains)
def get_domain(domaintxt, as_unicode=True):
ret = domaintxt.rstrip()
if as_unicode:
try:
ret = idna.decode(ret.encode('ascii'))
except (ValueError, UnicodeError, idna.IDNAError):
pass
return ret
| 29.171429
| 100
| 0.5857
|
794a4c7f15dbe76816e3d257ddc55d4020a30450
| 2,052
|
py
|
Python
|
zeppelin_comm_layer/kernel.py
|
bernhard-42/zeppelin-ipython-shim
|
2358cb8e7be3fe6e25f9e44a5557b4d8c08d6607
|
[
"Apache-2.0"
] | 4
|
2017-04-06T17:28:13.000Z
|
2018-07-16T19:46:15.000Z
|
zeppelin_comm_layer/kernel.py
|
bernhard-42/zeppelin-ipython-shim
|
2358cb8e7be3fe6e25f9e44a5557b4d8c08d6607
|
[
"Apache-2.0"
] | null | null | null |
zeppelin_comm_layer/kernel.py
|
bernhard-42/zeppelin-ipython-shim
|
2358cb8e7be3fe6e25f9e44a5557b4d8c08d6607
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Bernhard Walter
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from zeppelin_session import ZeppelinSession
from .comm_manager import ZeppelinCommManager
from .logger import Logger
class Kernel:
#
# The session will be created or retreived (zeppelin_session module) and a new
# CommManger gets created.
#
def __init__(self, zeppelinContext, _logLen):
self.logger = Logger(self.__class__.__name__, size=_logLen).get()
self.logger.info("Create ZeppelinSession")
self.session = ZeppelinSession(zeppelinContext)
self.logger.info("Create CommManager")
self.comm_manager = ZeppelinCommManager()
def startSession(self, _tag):
self.logger.debug("Start ZeppelinSession %s" % self.getSessionId())
self.session.start(_tag)
def resetSession(self):
self.logger.debug("Reset ZeppelinSession %s" % self.getSessionId())
self.session._reset()
def getSessionId(self):
return self.session.sessionId
def registerFunction(self, name, jsFunc):
self.logger.debug("Register Function %s for ZeppelinSession %s" % (name, self.getSessionId()))
self.session.registerFunction(name, jsFunc)
def unregisterFunction(self, name):
self.logger.debug("Unregister Function %s for ZeppelinSession %s" % (name, self.getSessionId()))
self.session.unregisterFunction(name)
def send(self, task, msg):
self.session.call("__jupyterHandler", {"task":task, "msg":msg})
| 36
| 104
| 0.701267
|
794a4e496964e9a48da06c33e480266f34ae440f
| 15,757
|
py
|
Python
|
projects/tutorials/minigrid_tutorial.py
|
brandontrabucco/allenact
|
0f323ac6f67a84a9de76359f5506c44eff64e0a1
|
[
"MIT"
] | 187
|
2020-08-28T16:59:41.000Z
|
2022-03-27T19:10:11.000Z
|
projects/tutorials/minigrid_tutorial.py
|
brandontrabucco/allenact
|
0f323ac6f67a84a9de76359f5506c44eff64e0a1
|
[
"MIT"
] | 120
|
2020-08-28T15:30:36.000Z
|
2022-03-13T00:38:44.000Z
|
projects/tutorials/minigrid_tutorial.py
|
964728623/robothor_challenge_objnav21
|
f75ed98f7d5bcc87b460f0c13e24dafc18edc895
|
[
"MIT"
] | 45
|
2020-08-28T18:30:04.000Z
|
2022-03-29T11:13:28.000Z
|
# literate: tutorials/minigrid-tutorial.md
# %%
"""# Tutorial: Navigation in MiniGrid."""
# %%
"""
In this tutorial, we will train an agent to complete the `MiniGrid-Empty-Random-5x5-v0` task within the
[MiniGrid](https://github.com/maximecb/gym-minigrid) environment. We will demonstrate how to:
* Write an experiment configuration file with a simple training pipeline from scratch.
* Use one of the supported environments with minimal user effort.
* Train, validate and test your experiment from the command line.
This tutorial assumes the [installation instructions](../installation/installation-allenact.md) have already been
followed and that, to some extent, this framework's [abstractions](../getting_started/abstractions.md) are known.
The `extra_requirements` for `minigrid_plugin` and `babyai_plugin` can be installed with.
```bash
pip install -r allenact_plugins/minigrid_plugin/extra_requirements.txt; pip install -r allenact_plugins/babyai_plugin/extra_requirements.txt
```
## The task
A `MiniGrid-Empty-Random-5x5-v0` task consists of a grid of dimensions 5x5 where an agent spawned at a random
location and orientation has to navigate to the visitable bottom right corner cell of the grid by sequences of three
possible actions (rotate left/right and move forward). A visualization of the environment with expert steps in a random
`MiniGrid-Empty-Random-5x5-v0` task looks like

The observation for the agent is a subset of the entire grid, simulating a simplified limited field of view, as
depicted by the highlighted rectangle (observed subset of the grid) around the agent (red arrow). Gray cells correspond
to walls.
## Experiment configuration file
Our complete experiment consists of:
* Training a basic actor-critic agent with memory to solve randomly sampled navigation tasks.
* Validation on a fixed set of tasks (running in parallel with training).
* A second stage where we test saved checkpoints with a larger fixed set of tasks.
The entire configuration for the experiment, including training, validation, and testing, is encapsulated in a single
class implementing the `ExperimentConfig` abstraction. For this tutorial, we will follow the config under
`projects/tutorials/minigrid_tutorial.py`.
The `ExperimentConfig` abstraction is used by the
[OnPolicyTrainer](../api/allenact/algorithms/onpolicy_sync/engine.md#onpolicytrainer) class (for training) and the
[OnPolicyInference](../api/allenact/algorithms/onpolicy_sync/engine.md#onpolicyinference) class (for validation and testing)
invoked through the entry script `main.py` that calls an orchestrating
[OnPolicyRunner](../api/allenact/algorithms/onpolicy_sync/runner.md#onpolicyrunner) class. It includes:
* A `tag` method to identify the experiment.
* A `create_model` method to instantiate actor-critic models.
* A `make_sampler_fn` method to instantiate task samplers.
* Three `{train,valid,test}_task_sampler_args` methods describing initialization parameters for task samplers used in
training, validation, and testing; including assignment of workers to devices for simulation.
* A `machine_params` method with configuration parameters that will be used for training, validation, and testing.
* A `training_pipeline` method describing a possibly multi-staged training pipeline with different types of losses,
an optimizer, and other parameters like learning rates, batch sizes, etc.
### Preliminaries
We first import everything we'll need to define our experiment.
"""
# %%
from typing import Dict, Optional, List, Any, cast
import gym
from gym_minigrid.envs import EmptyRandomEnv5x5
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses.ppo import PPO, PPOConfig
from allenact.base_abstractions.experiment_config import ExperimentConfig, TaskSampler
from allenact.base_abstractions.sensor import SensorSuite
from allenact.utils.experiment_utils import (
TrainingPipeline,
Builder,
PipelineStage,
LinearDecay,
)
from allenact_plugins.minigrid_plugin.minigrid_models import MiniGridSimpleConvRNN
from allenact_plugins.minigrid_plugin.minigrid_sensors import EgocentricMiniGridSensor
from allenact_plugins.minigrid_plugin.minigrid_tasks import (
MiniGridTaskSampler,
MiniGridTask,
)
# %%
"""
We now create the `MiniGridTutorialExperimentConfig` class which we will use to define our experiment.
For pedagogical reasons, we will add methods to this class one at a time below with a description of what
these classes do.
"""
# %%
class MiniGridTutorialExperimentConfig(ExperimentConfig):
# %%
"""An experiment is identified by a `tag`."""
# %%
@classmethod
def tag(cls) -> str:
return "MiniGridTutorial"
# %%
"""
### Sensors and Model
A readily available Sensor type for MiniGrid,
[EgocentricMiniGridSensor](../api/allenact_plugins/minigrid_plugin/minigrid_sensors.md#egocentricminigridsensor),
allows us to extract observations in a format consumable by an `ActorCriticModel` agent:
"""
# %%
SENSORS = [
EgocentricMiniGridSensor(agent_view_size=5, view_channels=3),
]
# %%
"""
The three `view_channels` include objects, colors and states corresponding to a partial observation of the environment
as an image tensor, equivalent to that from `ImgObsWrapper` in
[MiniGrid](https://github.com/maximecb/gym-minigrid#wrappers). The
relatively large `agent_view_size` means the view will only be clipped by the environment walls in the forward and
lateral directions with respect to the agent's orientation.
We define our `ActorCriticModel` agent using a lightweight implementation with recurrent memory for MiniGrid
environments, [MiniGridSimpleConvRNN](../api/allenact_plugins/minigrid_plugin/minigrid_models.md#minigridsimpleconvrnn):
"""
# %%
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return MiniGridSimpleConvRNN(
action_space=gym.spaces.Discrete(len(MiniGridTask.class_action_names())),
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
num_objects=cls.SENSORS[0].num_objects,
num_colors=cls.SENSORS[0].num_colors,
num_states=cls.SENSORS[0].num_states,
)
# %%
"""
### Task samplers
We use an available TaskSampler implementation for MiniGrid environments that allows to sample both random and
deterministic `MiniGridTasks`,
[MiniGridTaskSampler](../api/allenact_plugins/minigrid_plugin/minigrid_tasks.md#minigridtasksampler):
"""
# %%
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return MiniGridTaskSampler(**kwargs)
# %%
"""
This task sampler will during training (or validation/testing), randomly initialize new tasks for the agent to complete.
While it is not quite as important for this task type (as we test our agent in the same setting it is trained on) there
are a lot of good reasons we would like to sample tasks differently during training than during validation or testing.
One good reason, that is applicable in this tutorial, is that, during training, we would like to be able to sample tasks
forever while, during testing, we would like to sample a fixed number of tasks (as otherwise we would never finish
testing!). In `allenact` this is made possible by defining different arguments for the task sampler:
"""
# %%
def train_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
return self._get_sampler_args(process_ind=process_ind, mode="train")
def valid_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
return self._get_sampler_args(process_ind=process_ind, mode="valid")
def test_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
return self._get_sampler_args(process_ind=process_ind, mode="test")
# %%
"""
where, for convenience, we have defined a `_get_sampler_args` method:
"""
# %%
def _get_sampler_args(self, process_ind: int, mode: str) -> Dict[str, Any]:
"""Generate initialization arguments for train, valid, and test
TaskSamplers.
# Parameters
process_ind : index of the current task sampler
mode: one of `train`, `valid`, or `test`
"""
if mode == "train":
max_tasks = None # infinite training tasks
task_seeds_list = None # no predefined random seeds for training
deterministic_sampling = False # randomly sample tasks in training
else:
max_tasks = 20 + 20 * (mode == "test") # 20 tasks for valid, 40 for test
# one seed for each task to sample:
# - ensures different seeds for each sampler, and
# - ensures a deterministic set of sampled tasks.
task_seeds_list = list(
range(process_ind * max_tasks, (process_ind + 1) * max_tasks)
)
deterministic_sampling = (
True # deterministically sample task in validation/testing
)
return dict(
max_tasks=max_tasks, # see above
env_class=self.make_env, # builder for third-party environment (defined below)
sensors=self.SENSORS, # sensors used to return observations to the agent
env_info=dict(), # parameters for environment builder (none for now)
task_seeds_list=task_seeds_list, # see above
deterministic_sampling=deterministic_sampling, # see above
)
@staticmethod
def make_env(*args, **kwargs):
return EmptyRandomEnv5x5()
# %%
"""
Note that the `env_class` argument to the Task Sampler is the one determining which task type we are going to train the
model for (in this case, `MiniGrid-Empty-Random-5x5-v0` from
[gym-minigrid](https://github.com/maximecb/gym-minigrid#empty-environment))
. The sparse reward is
[given by the environment](https://github.com/maximecb/gym-minigrid/blob/6e22a44dc67414b647063692258a4f95ce789161/gym_minigrid/minigrid.py#L819)
, and the maximum task length is 100. For training, we opt for a default random sampling, whereas for validation and
test we define fixed sets of randomly sampled tasks without needing to explicitly define a dataset.
In this toy example, the maximum number of different tasks is 32. For validation we sample 320 tasks using 16 samplers,
or 640 for testing, so we can be fairly sure that all possible tasks are visited at least once during evaluation.
### Machine parameters
Given the simplicity of the task and model, we can quickly train the model on the CPU:
"""
# %%
@classmethod
def machine_params(cls, mode="train", **kwargs) -> Dict[str, Any]:
return {
"nprocesses": 128 if mode == "train" else 16,
"devices": [],
}
# %%
"""
We allocate a larger number of samplers for training (128) than for validation or testing (16), and we default to CPU
usage by returning an empty list of `devices`.
### Training pipeline
The last definition required before starting to train is a training pipeline. In this case, we just use a single PPO
stage with linearly decaying learning rate:
"""
# %%
@classmethod
def training_pipeline(cls, **kwargs) -> TrainingPipeline:
ppo_steps = int(150000)
return TrainingPipeline(
named_losses=dict(ppo_loss=PPO(**PPOConfig)), # type:ignore
pipeline_stages=[
PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps)
],
optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam), dict(lr=1e-4)),
num_mini_batch=4,
update_repeats=3,
max_grad_norm=0.5,
num_steps=16,
gamma=0.99,
use_gae=True,
gae_lambda=0.95,
advance_scene_rollout_period=None,
save_interval=10000,
metric_accumulate_interval=1,
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)} # type:ignore
),
)
# %%
"""
You can see that we use a `Builder` class to postpone the construction of some of the elements, like the optimizer,
for which the model weights need to be known.
## Training and validation
We have a complete implementation of this experiment's configuration class in `projects/tutorials/minigrid_tutorial.py`.
To start training from scratch, we just need to invoke
```bash
PYTHONPATH=. python allenact/main.py minigrid_tutorial -b projects/tutorials -m 8 -o /PATH/TO/minigrid_output -s 12345
```
from the `allenact` root directory.
* With `-b projects/tutorials` we tell `allenact` that `minigrid_tutorial` experiment config file
will be found in the `projects/tutorials` directory.
* With `-m 8` we limit the number of subprocesses to 8 (each subprocess will run 16 of the 128 training task samplers).
* With `-o minigrid_output` we set the output folder into which results and logs will be saved.
* With `-s 12345` we set the random seed.
If we have Tensorboard installed, we can track progress with
```bash
tensorboard --logdir /PATH/TO/minigrid_output
```
which will default to the URL [http://localhost:6006/](http://localhost:6006/).
After 150,000 steps, the script will terminate and several checkpoints will be saved in the output folder.
The training curves should look similar to:

If everything went well, the `valid` success rate should converge to 1 and the mean episode length to a value below 4.
(For perfectly uniform sampling and complete observation, the expectation for the optimal policy is 3.75 steps.) In the
not-so-unlikely event of the run failing to converge to a near-optimal policy, we can just try to re-run (for example
with a different random seed). The validation curves should look similar to:

## Testing
The training start date for the experiment, in `YYYY-MM-DD_HH-MM-SS` format, is used as the name of one of the
subfolders in the path to the checkpoints, saved under the output folder.
In order to evaluate (i.e. test) a particular checkpoint, we need to pass the `--eval` flag and specify the checkpoint with the
`--checkpoint CHECKPOINT_PATH` option:
```bash
PYTHONPATH=. python allenact/main.py minigrid_tutorial \
-b projects/tutorials \
-m 1 \
-o /PATH/TO/minigrid_output \
-s 12345 \
--eval \
--checkpoint /PATH/TO/minigrid_output/checkpoints/MiniGridTutorial/YOUR_START_DATE/exp_MiniGridTutorial__stage_00__steps_000000151552.pt
```
Again, if everything went well, the `test` success rate should converge to 1 and the mean episode length to a value
below 4. Detailed results are saved under a `metrics` subfolder in the output folder.
The test curves should look similar to:

"""
| 42.471698
| 148
| 0.71822
|
794a4f88b3d2030dc741eee4fb322599a6148f76
| 5,257
|
py
|
Python
|
examples/networking/simulation.py
|
gtataranni/bcc
|
b090f5f9eee62796829184ec862e3378a3b7e425
|
[
"Apache-2.0"
] | 58
|
2015-08-28T08:46:35.000Z
|
2022-02-27T14:31:55.000Z
|
examples/networking/simulation.py
|
gtataranni/bcc
|
b090f5f9eee62796829184ec862e3378a3b7e425
|
[
"Apache-2.0"
] | 9
|
2021-07-29T21:15:28.000Z
|
2022-02-16T18:17:49.000Z
|
examples/networking/simulation.py
|
gtataranni/bcc
|
b090f5f9eee62796829184ec862e3378a3b7e425
|
[
"Apache-2.0"
] | 12
|
2017-02-28T02:50:31.000Z
|
2021-07-26T17:54:07.000Z
|
import os
import subprocess
import pyroute2
from pyroute2 import IPRoute, NetNS, IPDB, NSPopen
class Simulation(object):
"""
Helper class for controlling multiple namespaces. Inherit from
this class and setup your namespaces.
"""
def __init__(self, ipdb):
self.ipdb = ipdb
self.ipdbs = {}
self.namespaces = []
self.processes = []
self.released = False
# helper function to add additional ifc to namespace
# if called directly outside Simulation class, "ifc_base_name" should be
# different from "name", the "ifc_base_name" and "name" are the same for
# the first ifc created by namespace
def _ns_add_ifc(self, name, ns_ifc, ifc_base_name=None, in_ifc=None,
out_ifc=None, ipaddr=None, macaddr=None, fn=None, cmd=None,
action="ok", disable_ipv6=False):
if name in self.ipdbs:
ns_ipdb = self.ipdbs[name]
else:
try:
nl=NetNS(name)
self.namespaces.append(nl)
except KeyboardInterrupt:
# remove the namespace if it has been created
pyroute2.netns.remove(name)
raise
ns_ipdb = IPDB(nl)
self.ipdbs[nl.netns] = ns_ipdb
if disable_ipv6:
cmd1 = ["sysctl", "-q", "-w",
"net.ipv6.conf.default.disable_ipv6=1"]
nsp = NSPopen(ns_ipdb.nl.netns, cmd1)
nsp.wait(); nsp.release()
try:
ns_ipdb.interfaces.lo.up().commit()
except pyroute2.ipdb.exceptions.CommitException:
print("Warning, commit for lo failed, operstate may be unknown")
if in_ifc:
in_ifname = in_ifc.ifname
with in_ifc as v:
# move half of veth into namespace
v.net_ns_fd = ns_ipdb.nl.netns
else:
# delete the potentially leaf-over veth interfaces
ipr = IPRoute()
for i in ipr.link_lookup(ifname='%sa' % ifc_base_name): ipr.link("del", index=i)
ipr.close()
try:
out_ifc = self.ipdb.create(ifname="%sa" % ifc_base_name, kind="veth",
peer="%sb" % ifc_base_name).commit()
in_ifc = self.ipdb.interfaces[out_ifc.peer]
in_ifname = in_ifc.ifname
with in_ifc as v:
v.net_ns_fd = ns_ipdb.nl.netns
except KeyboardInterrupt:
# explicitly remove the interface
out_ifname = "%sa" % ifc_base_name
if out_ifname in self.ipdb.interfaces: self.ipdb.interfaces[out_ifname].remove().commit()
raise
if out_ifc: out_ifc.up().commit()
try:
# this is a workaround for fc31 and possible other disto's.
# when interface 'lo' is already up, do another 'up().commit()'
# has issues in fc31.
# the workaround may become permanent if we upgrade pyroute2
# in all machines.
if 'state' in ns_ipdb.interfaces.lo.keys():
if ns_ipdb.interfaces.lo['state'] != 'up':
ns_ipdb.interfaces.lo.up().commit()
else:
ns_ipdb.interfaces.lo.up().commit()
except pyroute2.ipdb.exceptions.CommitException:
print("Warning, commit for lo failed, operstate may be unknown")
ns_ipdb.initdb()
in_ifc = ns_ipdb.interfaces[in_ifname]
with in_ifc as v:
v.ifname = ns_ifc
if ipaddr: v.add_ip("%s" % ipaddr)
if macaddr: v.address = macaddr
v.up()
if disable_ipv6:
cmd1 = ["sysctl", "-q", "-w",
"net.ipv6.conf.%s.disable_ipv6=1" % out_ifc.ifname]
subprocess.call(cmd1)
if fn and out_ifc:
self.ipdb.nl.tc("add", "ingress", out_ifc["index"], "ffff:")
self.ipdb.nl.tc("add-filter", "bpf", out_ifc["index"], ":1",
fd=fn.fd, name=fn.name, parent="ffff:",
action=action, classid=1)
if cmd:
self.processes.append(NSPopen(ns_ipdb.nl.netns, cmd))
return (ns_ipdb, out_ifc, in_ifc)
# helper function to create a namespace and a veth connecting it
def _create_ns(self, name, in_ifc=None, out_ifc=None, ipaddr=None,
macaddr=None, fn=None, cmd=None, action="ok", disable_ipv6=False):
(ns_ipdb, out_ifc, in_ifc) = self._ns_add_ifc(name, "eth0", name, in_ifc, out_ifc,
ipaddr, macaddr, fn, cmd, action,
disable_ipv6)
return (ns_ipdb, out_ifc, in_ifc)
def release(self):
if self.released: return
self.released = True
for p in self.processes:
if p.released: continue
try:
p.kill()
p.wait()
except:
pass
finally:
p.release()
for name, db in self.ipdbs.items(): db.release()
for ns in self.namespaces: ns.remove()
| 41.393701
| 105
| 0.536618
|
794a4fc247ed6508872ef32a9f9e4cc7cbe952c6
| 653
|
py
|
Python
|
c++调用py/test2模板/testpy.py
|
keetsky/c_c-_python_mutprog
|
f918db6ef1624b8c16efe2b4d384dd4c6f72d3dd
|
[
"Apache-2.0"
] | 3
|
2021-01-26T07:52:50.000Z
|
2021-11-25T11:28:36.000Z
|
c++调用py/test2模板/testpy.py
|
keetsky/c_c-_python_mutprog
|
f918db6ef1624b8c16efe2b4d384dd4c6f72d3dd
|
[
"Apache-2.0"
] | null | null | null |
c++调用py/test2模板/testpy.py
|
keetsky/c_c-_python_mutprog
|
f918db6ef1624b8c16efe2b4d384dd4c6f72d3dd
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Filename: test.py
print("start c++_py:")
int_data=5;
def HelloWorld():
print ("Hello World!")
def sayhi(name):
print ('hi',name);
return name
def add(a, b):
return a+b
def AddMult(a,b):
print("addmuilt in python")
return (a+b,a*b)
def TestDict(dict):
print (dict)
dict["Age"] = 17
return dict
class Person:
def sayHi(self,):
print ('hi')
def greet(self, greetStr):
print (greetStr)
return greetstr+"babby"
#the chinese words cannot be present ,otherwise error occur
class Second:
def invoke(self,obj):
obj.sayHi()
| 20.40625
| 59
| 0.578867
|
794a4fc80646101cbe626d9214c8b46b281568c6
| 3,618
|
py
|
Python
|
gallery/tests.py
|
gabyxbinnaeah/Photo-Gallery
|
6155df3a70d0955a01e6f2257789076c6a85abf4
|
[
"MIT"
] | null | null | null |
gallery/tests.py
|
gabyxbinnaeah/Photo-Gallery
|
6155df3a70d0955a01e6f2257789076c6a85abf4
|
[
"MIT"
] | null | null | null |
gallery/tests.py
|
gabyxbinnaeah/Photo-Gallery
|
6155df3a70d0955a01e6f2257789076c6a85abf4
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from .models import Location,Category,Image
# Create your tests here.
class LocationTestClass(TestCase):
def setUp(self):
'''
method that creates instance of location
'''
self.bondo = Location(name = 'bondo')
def test_instance(self):
'''
method that test if instance of location is generate
'''
self.assertTrue(isinstance(self.bondo, Location))
def test_save_location(self):
'''
function that test if location is saved t
'''
self.bondo.save_location()
searched_locations = Location.objects.all()
self.assertTrue(len(searched_locations ) >0)
def test_delete_location(self):
'''
function that test if location can be deleted
'''
self.bondo.save_location()
self.bondo.delete_location()
found_location=Location.objects.all()
self.assertTrue(len(found_location)==0)
def test_update_location(self):
'''
method that test if location can be updated
'''
self.bondo.save_location()
self.bondo.update_location(self.bondo.id,'Nairobi')
location_list=Location.objects.all()
self.assertTrue(len(location_list)==1)
updated_object=Location.objects.all().first()
self.assertTrue(updated_object.name=='Nairobi')
class CategoryTestClass(TestCase):
def setUp(self):
'''
method that creates instance of category
'''
self.large = Category(name = 'large')
def test_instance(self):
'''
method that test if instance of category is generate
'''
self.assertTrue(isinstance(self.large, Category))
def test_save_category(self):
'''
function that test if category is saved
'''
self.large.save_category()
searched_category = Category.objects.all()
self.assertTrue(len(searched_category ) >0)
def test_delete_category(self):
'''
function that test if category can be deleted
'''
self.large.save_category()
self.large.delete_category()
found_category=Category.objects.all()
self.assertTrue(len(found_category)==0)
def test_update_category(self):
'''
method that test if category can be updated
'''
self.large.save_category()
self.large.update_category(self.large.id,'small')
returned_category_list=Category.objects.all()
self.assertTrue(len(returned_category_list)==1)
updated_category_object=Category.objects.all().first()
self.assertTrue(updated_category_object.name=='small')
class ImageTestClass(TestCase):
def setUp(self):
'''
method that creates instance of image whenever test is run
'''
self.tech= Image (name='tech',description='network topology')
def test_instance(self):
'''
method that test if instance of image is created
'''
self.assertTrue(isinstance(self.tech, Image))
def test_save_image(self):
'''
function that checks if image is saved
'''
self.tech.save_images()
searched_image = Image.objects.all()
self.assertTrue(len(searched_image) >0)
def test_delete_image(self):
'''
method that checks if image can be deleted
'''
self.tech.save_images()
self.tech.delete_image()
found_after_delete=Image.objects.all()
self.assertTrue(len(found_after_delete)==0)
| 30.661017
| 69
| 0.621338
|
794a4fcac056b97fa04f6488930308a027a50a02
| 2,421
|
py
|
Python
|
tools/test_init.py
|
moranxiachong/PersonReID-VAAL
|
86948ef70793455487cd61709486653827e51bda
|
[
"Apache-2.0"
] | 2
|
2022-01-03T07:34:49.000Z
|
2022-01-19T08:42:56.000Z
|
tools/test_init.py
|
moranxiachong/PersonReID-VAAL
|
86948ef70793455487cd61709486653827e51bda
|
[
"Apache-2.0"
] | null | null | null |
tools/test_init.py
|
moranxiachong/PersonReID-VAAL
|
86948ef70793455487cd61709486653827e51bda
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
"""
@author: sherlock
@contact: sherlockliao01@gmail.com
"""
import argparse
import os
import sys
from os import mkdir
import torch
from torch.backends import cudnn
sys.path.append('.')
from config import cfg
from data import make_data_loader
from engine.inference import inference
from modeling import build_model
from utils.logger import setup_logger
import functions
def main():
parser = argparse.ArgumentParser(description="ReID Baseline Inference")
parser.add_argument(
"--config_file", default="", help="path to config file", type=str
)
parser.add_argument("opts", help="Modify config options using the command-line", default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
if args.config_file != "":
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir and not os.path.exists(output_dir):
mkdir(output_dir)
logger = setup_logger("reid_baseline", output_dir, 0)
logger.info("Using {} GPUS".format(num_gpus))
logger.info(args)
if args.config_file != "":
logger.info("Loaded configuration file {}".format(args.config_file))
with open(args.config_file, 'r') as cf:
config_str = "\n" + cf.read()
logger.info(config_str)
logger.info("Running with config:\n{}".format(cfg))
if cfg.MODEL.DEVICE == "cuda":
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
cudnn.benchmark = True
#train_loader, val_loader, num_query, num_classes = make_data_loader(cfg)
#model = build_model(cfg, num_classes)
#model.load_param(cfg.TEST.WEIGHT)
train_loader, val_loader, num_query, num_classes, num_classes2, image_map_label2 = make_data_loader(cfg)
model = build_model(cfg, num_classes, num_classes2)
print('--- resume from ', cfg.MODEL.PRETRAIN_PATH2)
if cfg.MODEL.ONCE_LOAD == 'yes':
print('\n---ONCE_LOAD...\n')
model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH2, map_location=lambda storage, loc: storage))
else:
functions.load_state_dict(model, cfg.MODEL.PRETRAIN_PATH2, cfg.MODEL.ONLY_BASE, cfg.MODEL.WITHOUT_FC)
inference(cfg, model, val_loader, num_query)
if __name__ == '__main__':
main()
| 31.441558
| 110
| 0.697233
|
794a50118ef7a5118db2b8d9956a8ca85e2c2278
| 2,436
|
py
|
Python
|
quik/preprocess.py
|
alexanderarcha95/py2quik
|
cd2933bc52ac3876dddce32bb17f33323a3eb60f
|
[
"Apache-2.0"
] | null | null | null |
quik/preprocess.py
|
alexanderarcha95/py2quik
|
cd2933bc52ac3876dddce32bb17f33323a3eb60f
|
[
"Apache-2.0"
] | null | null | null |
quik/preprocess.py
|
alexanderarcha95/py2quik
|
cd2933bc52ac3876dddce32bb17f33323a3eb60f
|
[
"Apache-2.0"
] | 1
|
2021-11-06T08:35:48.000Z
|
2021-11-06T08:35:48.000Z
|
import numpy as np
import pandas as pd
from sklearn import preprocessing as prep
from sklearn.preprocessing import MinMaxScaler
from collections import deque
from quik import prices
import random
def classify(current,future,thres = 100): # Returns 0 when price less than before.
diff = (float(future) - float(current))
if diff >= 0:
if diff > thres:
return 2
else:
return 1
if diff < 0:
if diff < -thres:
return -2
else:
return -1
def classify_binary(current,future): # Returns 0 when price less than before.
if float(future) > float(current):
return 1
else:
return 0
def preprocessing(df, SEQ_LEN = 500):
df = df.reset_index()
# Drop future values and targets
df = df.drop("future",1)
target = df["target"] # Assigning target to another pd.Series before droping
df = df.drop("target",1)
print('Dropping is done')
# Data as a changes
df = df + 1
df = df.pct_change()
print('Data as a changes')
# Scale from 0 to 1
min_max_scaler = MinMaxScaler()
df = min_max_scaler.fit_transform(df)
print('Scaled from 0 to 1')
# Adding target to rescaled DataFrame
df = pd.DataFrame(df)
df["target"] = target
df = df.dropna()
print("Added target to rescaled DataFrame")
# Creating sequences
sequential_data = []
#Filling list with sequential data
for i in range(0,len(df)):
if (i + SEQ_LEN) < len(df):
print(i,i+SEQ_LEN)
sequential_data.append([np.array(df.iloc[:,0:6][i:i+SEQ_LEN]), df["target"][i+SEQ_LEN-1:i+SEQ_LEN].values])
print("Filled sequential data")
#Data is shuffled
random.shuffle(sequential_data)
#Separating X and y
X,y = [],[]
for seq, target in sequential_data:
X.append(seq)
y.append(target)
print("All is done")
return np.array(X), np.array(y)
def get_training_data(lag=500,size = None):
df = prices.training_data(lag = lag)[:size] # Run function
df['target'] = list(map(classify_binary, df['price_rts'], df['future']))
return preprocessing(df) # Returns X and y
if __name__ == "__main__":
X,y = get_training_data(lag = 500)
print(X,y)
| 19.645161
| 120
| 0.582923
|
794a501754f22508fd193edd78c318b84b153a86
| 1,839
|
py
|
Python
|
reinvent-2019/rhythm-cloud/lib/ABElectronics_Python_Libraries/ExpanderPi/demos/demo_adcspeed.py
|
kienpham2000/aws-builders-fair-projects
|
6c4075c0945a6318b217355a6fc663e35ffb9dba
|
[
"Apache-2.0"
] | 2
|
2019-12-17T03:38:38.000Z
|
2021-05-28T06:23:58.000Z
|
reinvent-2019/rhythm-cloud/lib/ABElectronics_Python_Libraries/ExpanderPi/demos/demo_adcspeed.py
|
kienpham2000/aws-builders-fair-projects
|
6c4075c0945a6318b217355a6fc663e35ffb9dba
|
[
"Apache-2.0"
] | 8
|
2021-05-09T06:05:46.000Z
|
2022-03-02T09:53:20.000Z
|
reinvent-2019/rhythm-cloud/lib/ABElectronics_Python_Libraries/ExpanderPi/demos/demo_adcspeed.py
|
kienpham2000/aws-builders-fair-projects
|
6c4075c0945a6318b217355a6fc663e35ffb9dba
|
[
"Apache-2.0"
] | 3
|
2020-09-30T18:46:59.000Z
|
2020-10-21T21:20:26.000Z
|
#!/usr/bin/env python
"""
================================================
# ABElectronics Expander Pi | ADC Speed Demo
#
# Requires python smbus to be installed
# For Python 2 install with: sudo apt-get install python-smbus
# For Python 3 install with: sudo apt-get install python3-smbus
#
# run with: python demo_adcspeed.py
================================================
this demo tests the maximum sample speed for the ADC
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime
import numpy as N
try:
import ExpanderPi
except ImportError:
print("Failed to import ExpanderPi from python system path")
print("Importing from parent folder instead")
try:
import sys
sys.path.append('..')
import ExpanderPi
except ImportError:
raise ImportError(
"Failed to import library from parent folder")
def main():
'''
Main program function
'''
adc = ExpanderPi.ADC() # create an instance of the ADC
# set the reference voltage. this should be set to the exact voltage
# measured on the Expander Pi Vref pin.
adc.set_adc_refvoltage(4.096)
counter = 1
totalsamples = 100000
readarray = N.zeros(totalsamples)
starttime = datetime.datetime.now()
print("Start: " + str(starttime))
while counter < totalsamples:
# read the voltage from channel 1 and display on the screen
readarray[counter] = adc.read_adc_voltage(1, 0)
counter = counter + 1
endtime = datetime.datetime.now()
print("End: " + str(endtime))
totalseconds = (endtime - starttime).total_seconds()
samplespersecond = totalsamples / totalseconds
print("%.2f samples per second" % samplespersecond)
if __name__ == "__main__":
main()
| 24.52
| 73
| 0.637847
|
794a51c5673e74a2a5fce127ddc3185ecf3b3af6
| 5,460
|
py
|
Python
|
sympy/concrete/gosper.py
|
shivangdubey/sympy
|
bd3ddd4c71d439c8b623f69a02274dd8a8a82198
|
[
"BSD-3-Clause"
] | 2
|
2021-01-09T23:11:25.000Z
|
2021-01-11T15:04:22.000Z
|
sympy/concrete/gosper.py
|
shivangdubey/sympy
|
bd3ddd4c71d439c8b623f69a02274dd8a8a82198
|
[
"BSD-3-Clause"
] | 2
|
2020-08-18T15:21:59.000Z
|
2020-08-18T19:35:29.000Z
|
sympy/concrete/gosper.py
|
shivangdubey/sympy
|
bd3ddd4c71d439c8b623f69a02274dd8a8a82198
|
[
"BSD-3-Clause"
] | 2
|
2021-01-08T23:03:23.000Z
|
2021-01-13T18:57:02.000Z
|
"""Gosper's algorithm for hypergeometric summation. """
from sympy.core import S, Dummy, symbols
from sympy.core.compatibility import is_sequence
from sympy.polys import Poly, parallel_poly_from_expr, factor
from sympy.solvers import solve
from sympy.simplify import hypersimp
def gosper_normal(f, g, n, polys=True):
r"""
Compute the Gosper's normal form of ``f`` and ``g``.
Given relatively prime univariate polynomials ``f`` and ``g``,
rewrite their quotient to a normal form defined as follows:
.. math::
\frac{f(n)}{g(n)} = Z \cdot \frac{A(n) C(n+1)}{B(n) C(n)}
where ``Z`` is an arbitrary constant and ``A``, ``B``, ``C`` are
monic polynomials in ``n`` with the following properties:
1. `\gcd(A(n), B(n+h)) = 1 \forall h \in \mathbb{N}`
2. `\gcd(B(n), C(n+1)) = 1`
3. `\gcd(A(n), C(n)) = 1`
This normal form, or rational factorization in other words, is a
crucial step in Gosper's algorithm and in solving of difference
equations. It can be also used to decide if two hypergeometric
terms are similar or not.
This procedure will return a tuple containing elements of this
factorization in the form ``(Z*A, B, C)``.
Examples
========
>>> from sympy.concrete.gosper import gosper_normal
>>> from sympy.abc import n
>>> gosper_normal(4*n+5, 2*(4*n+1)*(2*n+3), n, polys=False)
(1/4, n + 3/2, n + 1/4)
"""
(p, q), opt = parallel_poly_from_expr(
(f, g), n, field=True, extension=True)
a, A = p.LC(), p.monic()
b, B = q.LC(), q.monic()
C, Z = A.one, a/b
h = Dummy('h')
D = Poly(n + h, n, h, domain=opt.domain)
R = A.resultant(B.compose(D))
roots = set(R.ground_roots().keys())
for r in set(roots):
if not r.is_Integer or r < 0:
roots.remove(r)
for i in sorted(roots):
d = A.gcd(B.shift(+i))
A = A.quo(d)
B = B.quo(d.shift(-i))
for j in range(1, i + 1):
C *= d.shift(-j)
A = A.mul_ground(Z)
if not polys:
A = A.as_expr()
B = B.as_expr()
C = C.as_expr()
return A, B, C
def gosper_term(f, n):
r"""
Compute Gosper's hypergeometric term for ``f``.
Suppose ``f`` is a hypergeometric term such that:
.. math::
s_n = \sum_{k=0}^{n-1} f_k
and `f_k` doesn't depend on `n`. Returns a hypergeometric
term `g_n` such that `g_{n+1} - g_n = f_n`.
Examples
========
>>> from sympy.concrete.gosper import gosper_term
>>> from sympy.functions import factorial
>>> from sympy.abc import n
>>> gosper_term((4*n + 1)*factorial(n)/factorial(2*n + 1), n)
(-n - 1/2)/(n + 1/4)
"""
r = hypersimp(f, n)
if r is None:
return None # 'f' is *not* a hypergeometric term
p, q = r.as_numer_denom()
A, B, C = gosper_normal(p, q, n)
B = B.shift(-1)
N = S(A.degree())
M = S(B.degree())
K = S(C.degree())
if (N != M) or (A.LC() != B.LC()):
D = {K - max(N, M)}
elif not N:
D = {K - N + 1, S.Zero}
else:
D = {K - N + 1, (B.nth(N - 1) - A.nth(N - 1))/A.LC()}
for d in set(D):
if not d.is_Integer or d < 0:
D.remove(d)
if not D:
return None # 'f(n)' is *not* Gosper-summable
d = max(D)
coeffs = symbols('c:%s' % (d + 1), cls=Dummy)
domain = A.get_domain().inject(*coeffs)
x = Poly(coeffs, n, domain=domain)
H = A*x.shift(1) - B*x - C
solution = solve(H.coeffs(), coeffs)
if solution is None:
return None # 'f(n)' is *not* Gosper-summable
x = x.as_expr().subs(solution)
for coeff in coeffs:
if coeff not in solution:
x = x.subs(coeff, 0)
if x.is_zero:
return None # 'f(n)' is *not* Gosper-summable
else:
return B.as_expr()*x/C.as_expr()
def gosper_sum(f, k):
r"""
Gosper's hypergeometric summation algorithm.
Given a hypergeometric term ``f`` such that:
.. math ::
s_n = \sum_{k=0}^{n-1} f_k
and `f(n)` doesn't depend on `n`, returns `g_{n} - g(0)` where
`g_{n+1} - g_n = f_n`, or ``None`` if `s_n` can not be expressed
in closed form as a sum of hypergeometric terms.
Examples
========
>>> from sympy.concrete.gosper import gosper_sum
>>> from sympy.functions import factorial
>>> from sympy.abc import n, k
>>> f = (4*k + 1)*factorial(k)/factorial(2*k + 1)
>>> gosper_sum(f, (k, 0, n))
(-factorial(n) + 2*factorial(2*n + 1))/factorial(2*n + 1)
>>> _.subs(n, 2) == sum(f.subs(k, i) for i in [0, 1, 2])
True
>>> gosper_sum(f, (k, 3, n))
(-60*factorial(n) + factorial(2*n + 1))/(60*factorial(2*n + 1))
>>> _.subs(n, 5) == sum(f.subs(k, i) for i in [3, 4, 5])
True
References
==========
.. [1] Marko Petkovsek, Herbert S. Wilf, Doron Zeilberger, A = B,
AK Peters, Ltd., Wellesley, MA, USA, 1997, pp. 73--100
"""
indefinite = False
if is_sequence(k):
k, a, b = k
else:
indefinite = True
g = gosper_term(f, k)
if g is None:
return None
if indefinite:
result = f*g
else:
result = (f*(g + 1)).subs(k, b) - (f*g).subs(k, a)
if result is S.NaN:
try:
result = (f*(g + 1)).limit(k, b) - (f*g).limit(k, a)
except NotImplementedError:
result = None
return factor(result)
| 24.931507
| 69
| 0.540293
|
794a52a0b427ace27050e2cf5f8987317a4309d4
| 3,011
|
py
|
Python
|
qiskit/visualization/tools/pi_check.py
|
lerongil/qiskit-terra
|
a25af2a2378bc3d4f5ec73b948d048d1b707454c
|
[
"Apache-2.0"
] | 1
|
2019-10-14T00:59:19.000Z
|
2019-10-14T00:59:19.000Z
|
qiskit/visualization/tools/pi_check.py
|
lerongil/qiskit-terra
|
a25af2a2378bc3d4f5ec73b948d048d1b707454c
|
[
"Apache-2.0"
] | null | null | null |
qiskit/visualization/tools/pi_check.py
|
lerongil/qiskit-terra
|
a25af2a2378bc3d4f5ec73b948d048d1b707454c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Check if number close to values of PI
"""
import numpy as np
from qiskit.exceptions import QiskitError
N, D = np.meshgrid(np.arange(1, 9), np.arange(1, 9))
FRAC_MESH = N / D * np.pi
def pi_check(inpt, eps=1e-6, output='text', ndigits=5):
""" Computes if a number is close to an integer
fraction or multiple of PI and returns the
corresponding string.
Args:
inpt (float): Number to check.
eps (float): EPS to check against.
output (str): Options are 'text' (default),
'latex', and 'mpl'.
ndigits (int): Number of digits to print
if returning raw inpt.
Returns:
str: string representation of output.
Raises:
QiskitError: if output is not a valid option.
"""
inpt = float(inpt)
if abs(inpt) < 1e-14:
return str(0)
val = inpt / np.pi
if output == 'text':
pi = 'pi'
elif output == 'latex':
pi = '\\pi'
elif output == 'mpl':
pi = '$\\pi$'
else:
raise QiskitError('pi_check parameter output should be text, latex, or mpl')
if abs(val) >= 1:
if abs(val % 1) < eps:
val = int(round(val))
if val == 1:
str_out = '{}'.format(pi)
elif val == -1:
str_out = '-{}'.format(pi)
else:
str_out = '{}{}'.format(val, pi)
return str_out
val = np.pi / inpt
if abs(abs(val) - abs(round(val))) < eps:
val = int(round(val))
if val > 0:
str_out = '{}/{}'.format(pi, val)
else:
str_out = '-{}/{}'.format(pi, abs(val))
return str_out
# Look for all fracs in 8
abs_val = abs(inpt)
frac = np.where(np.abs(abs_val - FRAC_MESH) < 1e-8)
if frac[0].shape[0]:
numer = int(frac[1][0]) + 1
denom = int(frac[0][0]) + 1
if inpt < 0:
numer *= -1
if numer == 1 and denom == 1:
str_out = '{}'.format(pi)
elif numer == -1 and denom == 1:
str_out = '-{}'.format(pi)
elif numer == 1:
str_out = '{}/{}'.format(pi, denom)
elif numer == -1:
str_out = '-{}/{}'.format(pi, denom)
elif denom == 1:
str_out = '{}/{}'.format(numer, pi)
else:
str_out = '{}{}/{}'.format(numer, pi, denom)
return str_out
# nothing found
str_out = '%.{}g'.format(ndigits) % inpt
return str_out
| 28.67619
| 84
| 0.536367
|
794a5348293cf204a0aa8804abdb4ee00844448e
| 5,140
|
py
|
Python
|
test.py
|
RogerZhangzz/CAG_UDA
|
422f99e2e0a5cb26a40d4f17ee5832f81580f7f0
|
[
"MIT"
] | 126
|
2019-10-30T00:58:02.000Z
|
2022-01-26T06:29:10.000Z
|
test.py
|
liyongsheng-tech/CAG_UDA
|
422f99e2e0a5cb26a40d4f17ee5832f81580f7f0
|
[
"MIT"
] | 14
|
2019-11-05T15:10:22.000Z
|
2022-02-08T09:05:53.000Z
|
test.py
|
liyongsheng-tech/CAG_UDA
|
422f99e2e0a5cb26a40d4f17ee5832f81580f7f0
|
[
"MIT"
] | 26
|
2019-12-02T09:41:11.000Z
|
2022-01-29T10:46:41.000Z
|
import os
import sys
import yaml
import time
import shutil
import torch
import random
import argparse
import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# import torchvision.models as models
# import torchvision
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from mpl_toolkits.mplot3d import Axes3D
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from PIL import Image
# from visdom import Visdom
_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'utils')
sys.path.append(_path)
from torch.utils import data
from tqdm import tqdm
from data import create_dataset
from models import create_model
from utils.utils import get_logger
from augmentations import get_composed_augmentations
from models.adaptation_model import CustomModel, CustomMetrics
from optimizers import get_optimizer
from schedulers import get_scheduler
from metrics import runningScore, averageMeter
from loss import get_loss_function
from utils import sync_batchnorm
from tensorboardX import SummaryWriter
def test(cfg, writer, logger):
torch.manual_seed(cfg.get('seed', 1337))
torch.cuda.manual_seed(cfg.get('seed', 1337))
np.random.seed(cfg.get('seed', 1337))
random.seed(cfg.get('seed', 1337))
## create dataset
default_gpu = cfg['model']['default_gpu']
device = torch.device("cuda:{}".format(default_gpu) if torch.cuda.is_available() else 'cpu')
datasets = create_dataset(cfg, writer, logger) #source_train\ target_train\ source_valid\ target_valid + _loader
model = CustomModel(cfg, writer, logger)
running_metrics_val = runningScore(cfg['data']['target']['n_class'])
source_running_metrics_val = runningScore(cfg['data']['target']['n_class'])
val_loss_meter = averageMeter()
source_val_loss_meter = averageMeter()
time_meter = averageMeter()
loss_fn = get_loss_function(cfg)
path = cfg['test']['path']
checkpoint = torch.load(path)
model.adaptive_load_nets(model.BaseNet, checkpoint['DeepLab']['model_state'])
validation(
model, logger, writer, datasets, device, running_metrics_val, val_loss_meter, loss_fn,\
source_val_loss_meter, source_running_metrics_val, iters = model.iter
)
def validation(model, logger, writer, datasets, device, running_metrics_val, val_loss_meter, loss_fn,\
source_val_loss_meter, source_running_metrics_val, iters):
iters = iters
_k = -1
model.eval(logger=logger)
torch.cuda.empty_cache()
with torch.no_grad():
validate(
datasets.target_valid_loader, device, model, running_metrics_val,
val_loss_meter, loss_fn
)
writer.add_scalar('loss/val_loss', val_loss_meter.avg, iters+1)
logger.info("Iter %d Loss: %.4f" % (iters + 1, val_loss_meter.avg))
writer.add_scalar('loss/source_val_loss', source_val_loss_meter.avg, iters+1)
logger.info("Iter %d Source Loss: %.4f" % (iters + 1, source_val_loss_meter.avg))
score, class_iou = running_metrics_val.get_scores()
for k, v in score.items():
print(k, v)
logger.info('{}: {}'.format(k, v))
writer.add_scalar('val_metrics/{}'.format(k), v, iters+1)
for k, v in class_iou.items():
logger.info('{}: {}'.format(k, v))
writer.add_scalar('val_metrics/cls_{}'.format(k), v, iters+1)
val_loss_meter.reset()
running_metrics_val.reset()
source_val_loss_meter.reset()
source_running_metrics_val.reset()
torch.cuda.empty_cache()
return score["Mean IoU : \t"]
def validate(valid_loader, device, model, running_metrics_val, val_loss_meter, loss_fn):
for (images_val, labels_val, filename) in tqdm(valid_loader):
images_val = images_val.to(device)
labels_val = labels_val.to(device)
_, _, feat_cls, outs = model.forward(images_val)
outputs = F.interpolate(outs, size=images_val.size()[2:], mode='bilinear', align_corners=True)
val_loss = loss_fn(input=outputs, target=labels_val)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels_val.data.cpu().numpy()
running_metrics_val.update(gt, pred)
val_loss_meter.update(val_loss.item())
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="config")
parser.add_argument(
"--config",
nargs="?",
type=str,
# default="configs/pspnet_cityscapes.yml",
# default="configs/pspnet_gta5.yml",
default='configs/test_from_gta_to_city.yml',
help="Configuration file to use"
)
args = parser.parse_args()
with open(args.config) as fp:
cfg = yaml.load(fp)
run_id = random.randint(1, 100000)
# path = cfg['training']['save_path']
logdir = os.path.join('runs', os.path.basename(args.config)[:-4], str(run_id))
writer = SummaryWriter(log_dir=logdir)
print('RUNDIR: {}'.format(logdir))
shutil.copy(args.config, logdir)
logger = get_logger(logdir)
logger.info('Let the games begin')
# train(cfg, writer, logger)
test(cfg, writer, logger)
| 33.376623
| 117
| 0.699222
|
794a53682b9463070b66a7fbdb83641ac799c9a0
| 6,384
|
py
|
Python
|
cs_gan/gan.py
|
kawa-work/deepmind-research
|
8fb75643598f680fdde8d20342b1b82bd2c0abb2
|
[
"Apache-2.0"
] | 10,110
|
2019-08-27T20:05:30.000Z
|
2022-03-31T16:31:56.000Z
|
cs_gan/gan.py
|
ibex-training/deepmind-research
|
6f8ae40b2626b30f5f80dfc92f5676689eff5599
|
[
"Apache-2.0"
] | 317
|
2019-11-09T10:19:10.000Z
|
2022-03-31T00:05:19.000Z
|
cs_gan/gan.py
|
ibex-training/deepmind-research
|
6f8ae40b2626b30f5f80dfc92f5676689eff5599
|
[
"Apache-2.0"
] | 2,170
|
2019-08-28T12:53:36.000Z
|
2022-03-31T13:15:11.000Z
|
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GAN modules."""
import collections
import math
import sonnet as snt
import tensorflow.compat.v1 as tf
from cs_gan import utils
class GAN(object):
"""Standard generative adversarial network setup.
The aim of the generator is to generate samples which fool a discriminator.
Does not make any assumptions about the discriminator and generator loss
functions.
Trained module components:
* discriminator
* generator
For the standard GAN algorithm, generator_inputs is a vector of noise (either
Gaussian or uniform).
"""
def __init__(self, discriminator, generator,
num_z_iters=None, z_step_size=None,
z_project_method=None, optimisation_cost_weight=None):
"""Constructs the module.
Args:
discriminator: The discriminator network. A sonnet module. See `nets.py`.
generator: The generator network. A sonnet module. For examples, see
`nets.py`.
num_z_iters: an integer, the number of latent optimisation steps.
z_step_size: an integer, latent optimisation step size.
z_project_method: the method for projecting latent after optimisation,
a string from {'norm', 'clip'}.
optimisation_cost_weight: a float, how much to penalise the distance of z
moved by latent optimisation.
"""
self._discriminator = discriminator
self.generator = generator
self.num_z_iters = num_z_iters
self.z_project_method = z_project_method
if z_step_size:
self._log_step_size_module = snt.TrainableVariable(
[],
initializers={'w': tf.constant_initializer(math.log(z_step_size))})
self.z_step_size = tf.exp(self._log_step_size_module())
self._optimisation_cost_weight = optimisation_cost_weight
def connect(self, data, generator_inputs):
"""Connects the components and returns the losses, outputs and debug ops.
Args:
data: a `tf.Tensor`: `[batch_size, ...]`. There are no constraints on the
rank
of this tensor, but it has to be compatible with the shapes expected
by the discriminator.
generator_inputs: a `tf.Tensor`: `[g_in_batch_size, ...]`. It does not
have to have the same batch size as the `data` tensor. There are not
constraints on the rank of this tensor, but it has to be compatible
with the shapes the generator network supports as inputs.
Returns:
An `ModelOutputs` instance.
"""
samples, optimised_z = utils.optimise_and_sample(
generator_inputs, self, data, is_training=True)
optimisation_cost = utils.get_optimisation_cost(generator_inputs,
optimised_z)
# Pass in the labels to the discriminator in case we are using a
# discriminator which makes use of labels. The labels can be None.
disc_data_logits = self._discriminator(data)
disc_sample_logits = self._discriminator(samples)
disc_data_loss = utils.cross_entropy_loss(
disc_data_logits,
tf.ones(tf.shape(disc_data_logits[:, 0]), dtype=tf.int32))
disc_sample_loss = utils.cross_entropy_loss(
disc_sample_logits,
tf.zeros(tf.shape(disc_sample_logits[:, 0]), dtype=tf.int32))
disc_loss = disc_data_loss + disc_sample_loss
generator_loss = utils.cross_entropy_loss(
disc_sample_logits,
tf.ones(tf.shape(disc_sample_logits[:, 0]), dtype=tf.int32))
optimization_components = self._build_optimization_components(
discriminator_loss=disc_loss, generator_loss=generator_loss,
optimisation_cost=optimisation_cost)
debug_ops = {}
debug_ops['disc_data_loss'] = disc_data_loss
debug_ops['disc_sample_loss'] = disc_sample_loss
debug_ops['disc_loss'] = disc_loss
debug_ops['gen_loss'] = generator_loss
debug_ops['opt_cost'] = optimisation_cost
if hasattr(self, 'z_step_size'):
debug_ops['z_step_size'] = self.z_step_size
return utils.ModelOutputs(
optimization_components, debug_ops)
def gen_loss_fn(self, data, samples):
"""Generator loss as latent optimisation's error function."""
del data
disc_sample_logits = self._discriminator(samples)
generator_loss = utils.cross_entropy_loss(
disc_sample_logits,
tf.ones(tf.shape(disc_sample_logits[:, 0]), dtype=tf.int32))
return generator_loss
def _build_optimization_components(
self, generator_loss=None, discriminator_loss=None,
optimisation_cost=None):
"""Create the optimization components for this module."""
discriminator_vars = _get_and_check_variables(self._discriminator)
generator_vars = _get_and_check_variables(self.generator)
if hasattr(self, '_log_step_size_module'):
step_vars = _get_and_check_variables(self._log_step_size_module)
generator_vars += step_vars
optimization_components = collections.OrderedDict()
optimization_components['disc'] = utils.OptimizationComponent(
discriminator_loss, discriminator_vars)
if self._optimisation_cost_weight:
generator_loss += self._optimisation_cost_weight * optimisation_cost
optimization_components['gen'] = utils.OptimizationComponent(
generator_loss, generator_vars)
return optimization_components
def get_variables(self):
disc_vars = _get_and_check_variables(self._discriminator)
gen_vars = _get_and_check_variables(self.generator)
return disc_vars, gen_vars
def _get_and_check_variables(module):
module_variables = module.get_all_variables()
if not module_variables:
raise ValueError(
'Module {} has no variables! Variables needed for training.'.format(
module.module_name))
# TensorFlow optimizers require lists to be passed in.
return list(module_variables)
| 37.775148
| 79
| 0.724937
|
794a538c401821ebe9ede9ba71c220885645a40a
| 2,058
|
py
|
Python
|
core/urls.py
|
dakinwerneburg/gradify
|
276f20ba2830918eac13cb5cafe7261cd1d21e70
|
[
"Apache-2.0"
] | null | null | null |
core/urls.py
|
dakinwerneburg/gradify
|
276f20ba2830918eac13cb5cafe7261cd1d21e70
|
[
"Apache-2.0"
] | 6
|
2021-01-15T20:59:11.000Z
|
2022-02-10T11:51:17.000Z
|
core/urls.py
|
dakinwerneburg/gradify
|
276f20ba2830918eac13cb5cafe7261cd1d21e70
|
[
"Apache-2.0"
] | null | null | null |
"""
URLs file for core Gradify app.
"""
from django.urls import path, re_path
from django.views.generic import TemplateView
from . import views
urlpatterns = [
path('', TemplateView.as_view(template_name="core/index.html"), name='home'),
path('import/', views.gc_ingest_and_redirect, name='gc-import'),
path('export/', views.export_csv_list_view, name='course-export'),
# Course routes
path('course/', views.CoursesView.as_view(), name='course-list'),
path('course/create/', views.CourseCreateView.as_view(), name='course-create'),
path('course/<int:pk>/', views.CourseDetailView.as_view(), name='course-detail'),
path('course/<int:pk>/delete/', views.CourseDeleteView.as_view(), name="course-delete"),
path('course/<int:pk>/gradebook/', views.StudentSubmissionsView.as_view(),
name='studentsubmission-list'),
path('course/<int:pk>/roster/', views.CourseRosterView.as_view(), name='course-roster'),
path('course/<int:pk>/assignment/', views.CourseWorkListView.as_view(), name='coursework-list'),
path('course/<int:pk>/assignment/<int:pk2>/', views.CourseWorkDetailView.as_view(),
name='coursework-detail'),
path('course/<int:pk>/assignment/<int:pk2>/update', views.CourseWorkUpdateView.as_view(),
name='coursework-update'),
# Assignment routes
path('assignment/create/', views.CourseWorkCreateView.as_view(), name='coursework-create'),
path('assignment/delete', views.CourseWorkDeleteView.as_view(), name='coursework-delete'),
# Verification routes
path('googleb95a6feb416ee79e.html', views.google_verification, name='google-verification'),
re_path(r'^.well-known/acme-challenge/.*$', views.acme_challenge, name='acme-challenge'),
# Gradebook change routes
path('gradebook/studentsubmission/<int:pk>/update/',
views.StudentSubmissionUpdateView.as_view(), name='studentsubmission-update'),
path('gradebook/<int:pk>/studentsubmission/create/', views.StudentSubmissionCreateView.as_view(),
name='studentsubmission-create'),
]
| 54.157895
| 101
| 0.714286
|
794a5457b17658533b879b25f75f819af0888606
| 463
|
py
|
Python
|
solving_equations.py
|
ioyy900205/PyTorch_mess-around
|
90d255e17158699fd7902f7746b35fa18975112e
|
[
"MIT"
] | null | null | null |
solving_equations.py
|
ioyy900205/PyTorch_mess-around
|
90d255e17158699fd7902f7746b35fa18975112e
|
[
"MIT"
] | null | null | null |
solving_equations.py
|
ioyy900205/PyTorch_mess-around
|
90d255e17158699fd7902f7746b35fa18975112e
|
[
"MIT"
] | null | null | null |
import torch
from torch.autograd import Variable
x=torch.Tensor([100.])
#建立一个张量 tensor([1.], requires_grad=True)
x=Variable(x,requires_grad=True)
print('grad',x.grad,'data',x.data)
learning_rate=0.01
epochs=5000
for epoch in range(epochs):
y = x**2
y.backward()
print('grad',x.grad.data)
x.data=x.data-learning_rate*x.grad.data
#在PyTorch中梯度会积累假如不及时清零
x.grad.data.zero_()
print(x.data)
print(y)
| 24.368421
| 45
| 0.641469
|
794a54f001b7720bc5abaf17b0998cb1e6058405
| 9,576
|
py
|
Python
|
xen/xen-4.2.2/tools/python/xen/xend/XendDSCSI.py
|
zhiming-shen/Xen-Blanket-NG
|
47e59d9bb92e8fdc60942df526790ddb983a5496
|
[
"Apache-2.0"
] | 1
|
2018-02-02T00:15:26.000Z
|
2018-02-02T00:15:26.000Z
|
xen/xen-4.2.2/tools/python/xen/xend/XendDSCSI.py
|
zhiming-shen/Xen-Blanket-NG
|
47e59d9bb92e8fdc60942df526790ddb983a5496
|
[
"Apache-2.0"
] | null | null | null |
xen/xen-4.2.2/tools/python/xen/xend/XendDSCSI.py
|
zhiming-shen/Xen-Blanket-NG
|
47e59d9bb92e8fdc60942df526790ddb983a5496
|
[
"Apache-2.0"
] | 1
|
2019-05-27T09:47:18.000Z
|
2019-05-27T09:47:18.000Z
|
#============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright FUJITSU LIMITED 2008
# Masaki Kanno <kanno.masaki@jp.fujitsu.com>
#============================================================================
from xen.xend.XendBase import XendBase
from xen.xend.XendPSCSI import XendPSCSI
from xen.xend import XendAPIStore
from xen.xend import sxp
from xen.xend import uuid as genuuid
import XendDomain, XendNode
from XendError import *
from XendTask import XendTask
from XendLogging import log
class XendDSCSI(XendBase):
"""Representation of a half-virtualized SCSI device."""
def getClass(self):
return "DSCSI"
def getAttrRO(self):
attrRO = ['VM',
'PSCSI',
'HBA',
'virtual_host',
'virtual_channel',
'virtual_target',
'virtual_lun',
'virtual_HCTL',
'runtime_properties']
return XendBase.getAttrRO() + attrRO
def getAttrRW(self):
attrRW = []
return XendBase.getAttrRW() + attrRW
def getAttrInst(self):
attrInst = ['VM',
'PSCSI',
'HBA',
'virtual_HCTL']
return XendBase.getAttrInst() + attrInst
def getMethods(self):
methods = ['destroy']
return XendBase.getMethods() + methods
def getFuncs(self):
funcs = ['create']
return XendBase.getFuncs() + funcs
getClass = classmethod(getClass)
getAttrRO = classmethod(getAttrRO)
getAttrRW = classmethod(getAttrRW)
getAttrInst = classmethod(getAttrInst)
getMethods = classmethod(getMethods)
getFuncs = classmethod(getFuncs)
def create(self, dscsi_struct):
# Check if VM is valid
xendom = XendDomain.instance()
if not xendom.is_valid_vm(dscsi_struct['VM']):
raise InvalidHandleError('VM', dscsi_struct['VM'])
dom = xendom.get_vm_by_uuid(dscsi_struct['VM'])
# Check if PSCSI is valid
xennode = XendNode.instance()
pscsi_uuid = xennode.get_pscsi_by_uuid(dscsi_struct['PSCSI'])
if not pscsi_uuid:
raise InvalidHandleError('PSCSI', dscsi_struct['PSCSI'])
# Assign PSCSI to VM
try:
dscsi_ref = XendTask.log_progress(0, 100, \
dom.create_dscsi, \
dscsi_struct)
except XendError, e:
log.exception("Error in create_dscsi")
raise
return dscsi_ref
create = classmethod(create)
def get_by_VM(cls, VM_ref):
result = []
for dscsi in XendAPIStore.get_all("DSCSI"):
if dscsi.get_VM() == VM_ref:
result.append(dscsi.get_uuid())
return result
get_by_VM = classmethod(get_by_VM)
def __init__(self, uuid, record):
XendBase.__init__(self, uuid, record)
v_hctl = self.virtual_HCTL.split(':')
self.virtual_host = int(v_hctl[0])
self.virtual_channel = int(v_hctl[1])
self.virtual_target = int(v_hctl[2])
self.virtual_lun = int(v_hctl[3])
def get_VM(self):
return self.VM
def get_PSCSI(self):
return self.PSCSI
def get_HBA(self):
return self.HBA
def get_virtual_host(self):
return self.virtual_host
def get_virtual_channel(self):
return self.virtual_channel
def get_virtual_target(self):
return self.virtual_target
def get_virtual_lun(self):
return self.virtual_lun
def get_virtual_HCTL(self):
return self.virtual_HCTL
def get_runtime_properties(self):
xendom = XendDomain.instance()
dominfo = xendom.get_vm_by_uuid(self.VM)
try:
device_dict = {}
for device_sxp in dominfo.getDeviceSxprs('vscsi'):
target_dev = None
for dev in device_sxp[1][0][1]:
vdev = sxp.child_value(dev, 'v-dev')
if vdev == self.virtual_HCTL:
target_dev = dev
break
if target_dev is None:
continue
dev_dict = {}
for info in target_dev[1:]:
dev_dict[info[0]] = info[1]
device_dict['dev'] = dev_dict
for info in device_sxp[1][1:]:
device_dict[info[0]] = info[1]
return device_dict
except Exception, exn:
log.exception(exn)
return {}
def destroy(self):
xendom = XendDomain.instance()
dom = xendom.get_vm_by_uuid(self.get_VM())
if not dom:
raise InvalidHandleError("VM", self.get_VM())
XendTask.log_progress(0, 100, \
dom.destroy_dscsi, \
self.get_uuid())
class XendDSCSI_HBA(XendBase):
"""Representation of a half-virtualized SCSI HBA."""
def getClass(self):
return "DSCSI_HBA"
def getAttrRO(self):
attrRO = ['VM',
'PSCSI_HBAs',
'DSCSIs',
'virtual_host',
'assignment_mode']
return XendBase.getAttrRO() + attrRO
def getAttrRW(self):
attrRW = []
return XendBase.getAttrRW() + attrRW
def getAttrInst(self):
attrInst = ['VM',
'virtual_host',
'assignment_mode']
return XendBase.getAttrInst() + attrInst
def getMethods(self):
methods = ['destroy']
return XendBase.getMethods() + methods
def getFuncs(self):
funcs = ['create']
return XendBase.getFuncs() + funcs
getClass = classmethod(getClass)
getAttrRO = classmethod(getAttrRO)
getAttrRW = classmethod(getAttrRW)
getAttrInst = classmethod(getAttrInst)
getMethods = classmethod(getMethods)
getFuncs = classmethod(getFuncs)
def create(self, dscsi_HBA_struct):
# Check if VM is valid
xendom = XendDomain.instance()
if not xendom.is_valid_vm(dscsi_HBA_struct['VM']):
raise InvalidHandleError('VM', dscsi_HBA_struct['VM'])
dom = xendom.get_vm_by_uuid(dscsi_HBA_struct['VM'])
# Check if PSCSI_HBA is valid
xennode = XendNode.instance()
pscsi_HBA_uuid = xennode.get_pscsi_HBA_by_uuid(dscsi_HBA_struct['PSCSI_HBA'])
if not pscsi_HBA_uuid:
raise InvalidHandleError('PSCSI_HBA', dscsi_HBA_struct['PSCSI_HBA'])
# Assign PSCSI_HBA and PSCSIs to VM
try:
dscsi_HBA_ref = XendTask.log_progress(0, 100, \
dom.create_dscsi_HBA, \
dscsi_HBA_struct)
except XendError, e:
log.exception("Error in create_dscsi_HBA")
raise
return dscsi_HBA_ref
create = classmethod(create)
def get_by_VM(cls, VM_ref):
result = []
for dscsi_HBA in XendAPIStore.get_all("DSCSI_HBA"):
if dscsi_HBA.get_VM() == VM_ref:
result.append(dscsi_HBA.get_uuid())
return result
get_by_VM = classmethod(get_by_VM)
def __init__(self, uuid, record):
XendBase.__init__(self, uuid, record)
self.virtual_host = record['virtual_host']
self.assignment_mode = record['assignment_mode']
def get_VM(self):
return self.VM
def get_PSCSI_HBAs(self):
PSCSIs = []
uuid = self.get_uuid()
for dscsi in XendAPIStore.get_all('DSCSI'):
if dscsi.get_VM() == self.VM and dscsi.get_HBA() == uuid:
PSCSIs.append(dscsi.get_PSCSI())
PSCSI_HBAs = []
for pscsi_uuid in PSCSIs:
pscsi_HBA_uuid = XendAPIStore.get(pscsi_uuid, 'PSCSI').get_HBA()
if not pscsi_HBA_uuid in PSCSI_HBAs:
PSCSI_HBAs.append(pscsi_HBA_uuid)
return PSCSI_HBAs
def get_DSCSIs(self):
DSCSIs = []
uuid = self.get_uuid()
for dscsi in XendAPIStore.get_all('DSCSI'):
if dscsi.get_VM() == self.VM and dscsi.get_HBA() == uuid:
DSCSIs.append(dscsi.get_uuid())
return DSCSIs
def get_virtual_host(self):
return self.virtual_host
def get_assignment_mode(self):
return self.assignment_mode
def destroy(self):
xendom = XendDomain.instance()
dom = xendom.get_vm_by_uuid(self.get_VM())
if not dom:
raise InvalidHandleError("VM", self.get_VM())
XendTask.log_progress(0, 100, \
dom.destroy_dscsi_HBA, \
self.get_uuid())
| 31.92
| 85
| 0.56934
|
794a54ffa64a4aa56fa5fc7c59cb5e23fd4ceadb
| 1,295
|
py
|
Python
|
Configuration/broadcast_sendfiles.py
|
adrien-bellaiche/Interceptor
|
ff6c9674141082b55a711df67a625759304a9b1b
|
[
"Apache-2.0"
] | null | null | null |
Configuration/broadcast_sendfiles.py
|
adrien-bellaiche/Interceptor
|
ff6c9674141082b55a711df67a625759304a9b1b
|
[
"Apache-2.0"
] | null | null | null |
Configuration/broadcast_sendfiles.py
|
adrien-bellaiche/Interceptor
|
ff6c9674141082b55a711df67a625759304a9b1b
|
[
"Apache-2.0"
] | null | null | null |
import sys
import pexpect
from Interceptor.JogCommand.Utils import make_mission_file
nArgs = len(sys.argv)-1
if nArgs != 1:
print "enter parameters : jogNumber"
else:
files_paths = file("broadcast_sendfiles", 'r').readlines()
ids = [int(_) for _ in file("broacast_init_config", 'r').readline().split() if _.isdigit()]
config_file_data = file("jogs.conf", 'r').readlines() # TODO : fichier jogs.conf
for stnum in ids:
ipaddr = "172.20.25.%s" % stnum
passwd = "root%s" % stnum
make_mission_file(config_file_data[stnum])
for file_path in files_paths:
file_path_sep = file_path.split()
src = file_path
dest = "/root/Interceptor"
if len(file_path_sep) > 1:
dest = "/".join(["/root/Interceptor", file_path_sep[0:-1:1]])
cmd = "scp %s root@%s:%s/" % (src, ipaddr, dest)
child1 = pexpect.spawn(cmd)
# child1.expect(["password:","pass","word:",":","Password:",pexpect.EOF, pexpect.TIMEOUT])
child1.expect("password:") # TODO : May fail here
# child1.expect(pexpect.EOF)
child1.sendline(passwd+'\r') # \r seems not to work without CR
child1.expect(pexpect.EOF)
print "jog%s OK" % stnum
| 39.242424
| 102
| 0.59305
|
794a557289a3afd52ee68905fb637bdaf6bf7c97
| 3,768
|
py
|
Python
|
app/recipe/views.py
|
JirkaFait/recepty-app-api
|
4a5f85ff58ec52190692f761ea2dcd4b255f4f4c
|
[
"MIT"
] | null | null | null |
app/recipe/views.py
|
JirkaFait/recepty-app-api
|
4a5f85ff58ec52190692f761ea2dcd4b255f4f4c
|
[
"MIT"
] | null | null | null |
app/recipe/views.py
|
JirkaFait/recepty-app-api
|
4a5f85ff58ec52190692f761ea2dcd4b255f4f4c
|
[
"MIT"
] | null | null | null |
from rest_framework import viewsets, mixins, status
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import action
from rest_framework.response import Response
from core.models import Tag, Ingredient, Recipe, Units
from recipe import serializers
class BaseRecipeAttrViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin):
"""Base viewset for user owned recipe attributes"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
"""Return objects for current user"""
assigned_only = bool(
int(self.request.query_params.get('assigned_only', 0))
)
queryset = self.queryset
if assigned_only:
queryset = queryset.filter(recipe__isnull=False)
return queryset.filter(
user=self.request.user
).order_by('-name').distinct()
def perform_create(self, serializer):
"""Create a new ingredient"""
serializer.save(user=self.request.user)
class TagViewSet(BaseRecipeAttrViewSet):
"""Manage tags in the database"""
queryset = Tag.objects.all()
serializer_class = serializers.TagSerializer
class UnitsViewSet(BaseRecipeAttrViewSet):
"""Manage units in the database"""
queryset = Units.objects.all()
serializer_class = serializers.UnitsSerializer
class IngredientViewSet(BaseRecipeAttrViewSet):
"""Manage ingredients in the database"""
queryset = Ingredient.objects.all()
serializer_class = serializers.IngredientSerializer
class RecipeViewSet(viewsets.ModelViewSet):
"""Manage recipes in the database"""
serializer_class = serializers.RecipeSerializer
queryset = Recipe.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def _params_to_ints(self, qs):
"""Convert a list of string IDs to a list of integers"""
return [int(str_id) for str_id in qs.split(',')]
def get_queryset(self):
"""Retrieve the recipes for the authenticated user"""
tags = self.request.query_params.get('tags')
ingredients = self.request.query_params.get('ingredients')
queryset = self.queryset
if tags:
tag_ids = self._params_to_ints(tags)
queryset = queryset.filter(tags__id__in=tag_ids)
if ingredients:
ingredient_ids = self._params_to_ints(ingredients)
queryset = queryset.filter(ingredients__id__in=ingredient_ids)
return queryset.filter(user=self.request.user)
def get_serializer_class(self):
"""Return appropriate serializer class"""
if self.action == 'retrieve':
return serializers.RecipeDetailSerializer
elif self.action == 'upload_image':
return serializers.RecipeImageSerializer
return self.serializer_class
def perform_create(self, serializer):
"""Create a new recipe"""
serializer.save(user=self.request.user)
@action(methods=['POST'], detail=True, url_path='upload-image')
def upload_image(self, request, pk=None):
"""Upload an image to a recipe"""
recipe = self.get_object()
serializer = self.get_serializer(
recipe,
data=request.data
)
if serializer.is_valid():
serializer.save()
return Response(
serializer.data,
status=status.HTTP_200_OK
)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
| 33.642857
| 74
| 0.669055
|
794a55c4b2bdac9be4f7238959c7e6fa16eb0947
| 12,194
|
py
|
Python
|
bgboard.py
|
zacharytower/Backgammon
|
085b64bcbd220199d799832696422a50c2c0c2dc
|
[
"Apache-2.0"
] | null | null | null |
bgboard.py
|
zacharytower/Backgammon
|
085b64bcbd220199d799832696422a50c2c0c2dc
|
[
"Apache-2.0"
] | null | null | null |
bgboard.py
|
zacharytower/Backgammon
|
085b64bcbd220199d799832696422a50c2c0c2dc
|
[
"Apache-2.0"
] | null | null | null |
import bgspace, pygame, time
from global_vars import *
class BGBoard(object):
'''
The BGBoard Class is the object that contains the game state. That is, the
position of each piece on the board.
This class deals with drawing the board onto DISPLAYSURF and processing BGMove requests.
This class can also reset the board, draw pieces that are moved off and displays the win animation.
Furthermore, this object will also handle click positions and return data about the item clicked.
'''
def __init__(self,colorScheme):
'''
initiates object. The only data needed is the color scheme, a dictionary of colors.
{pieceColorA, pieceColorB, dieColorA, dieColorB, backgroundColor, spaceColorA, spaceColorB}
'''
self.pieceColorA = colorScheme['pieceColorA']
self.pieceColorB = colorScheme['pieceColorB']
self.dieColorA = colorScheme['dieColorA']['cube']
self.pipColorA = colorScheme['dieColorA']['pip']
self.dieColorB = colorScheme['dieColorB']['cube']
self.pipColorB = colorScheme['dieColorB']['pip']
self.spaceColorA = colorScheme['spaceColorA']
self.spaceColorB = colorScheme['spaceColorB']
self.backgroundColor = colorScheme['backgroundColor']
self.boarderColor = colorScheme['boarderColor']
self.messageBoxColor = colorScheme['messageBoxColor']
self.messageTextColor = colorScheme['messageTextColor']
self.holderColor = colorScheme['holderColor']
self.chipBoarderColor = colorScheme['chipBoarderColor']
self.chipsOnBar = []
# set default board
a,b = self.pieceColorA, self.pieceColorB
self.defaultBoardValues = {0: {'player': b, 'chips':2}, 5:{'player':a, 'chips':5},7:{'player':a, 'chips':3}, 11:{'player':b,'chips':5},
12:{'player':a,'chips':5},16:{'player':b,'chips':3},18:{'player':b,'chips':5},23:{'player':a,'chips':2}}
self.movedOff = {self.pieceColorA:0, self.pieceColorB:0}
self.message = ''
self.diceRolled = False
self.diceRolledColor = None
self.rollOff = True
self.rollA, self.rollB = None,None
self.resetBoard()
def __getitem__(self,key):
return self.spaceList[key]
def getClickedSpace(self,clickPos, reduced = False):
''' returns the index of the clicked space.
If click did not click any space, then -1 is returned.
If reduced is set to true, then the space rectangle will be reduced to the rectangle
surrounding the pieces. (see BGSpace.entireRectangle() vs. BGSpace.reducedRectangle())'''
for space in self.spaceList:
if reduced == False:
r = space.entireRectangle()
else:
r = space.reducedRectangle()
#pygame.draw.rect(DISPLAYSURF,(0,0,0),space.reducedRectangle())
#pygame.display.update()
#time.sleep(1)
#raw_input()
if r.collidepoint(clickPos): # space was clicked:
return space.index
for i,owner in enumerate(self.chipsOnBar):
x,y = (375, 325 + (50 * i * -1 if i % 2 != 0 else 1))
if (clickPos[0] - x) ** 2 + (clickPos[1] - y) ** 2 <= (25) ** 2:
return 'bar'
for y in [50,450]:
if pygame.Rect(700,y,125,150).collidepoint(clickPos): return 'offboard'
return -1
def resetBoard(self):
self.spaceList = []
for x in range(24):
try:
owner, quantity = self.defaultBoardValues[x]['player'], self.defaultBoardValues[x]['chips']
except KeyError:
owner, quantity = None, 0
self.spaceList.append(bgspace.BGSpace(x, self.spaceColorB if x % 2 == 0 else self.spaceColorA, owner,quantity))
def displayBoard(self):
''' draws the board to DISPLAYSURF. Draws all of the spaces as well as other pieces of the board.'''
# draw background rectangles
for x in [50,400]:
# rectangle with a x value of (x), y value of 50, width of 300, and height of 550.
pygame.draw.rect(DISPLAYSURF, self.backgroundColor, (x,50,300,550))
'''
# draw each space
for space in self.spaceList:
space.drawToBoard()'''
# draw outline perimeter
# first, lets draw the board boarders and the bar.
# [boarder,
# bar,
# seperates (C,D) from chip holders,
# seperates chip holders from message box]
rectectTuples = [(0,0,925,50), (0,0,50,650), (0,600,925,50), (875,0,50,650),
(350,0,50,650),
(700,0,50,650),
(750,200,125,25),
(750,425,125,25)]
for rectectTuple in rectectTuples:
pygame.draw.rect(DISPLAYSURF, self.boarderColor, rectectTuple)
# draw chip holders
for y in [50,450]:
pygame.draw.rect(DISPLAYSURF, self.holderColor,(750,y,125,150))
# draw chips that are moved off.
for i in range(self.movedOff[self.pieceColorA]):
ys = [[f] * 5 for f in range(15)]
rt = (700 + (5*i) % 30,ys[i],25,50)
pygame.draw.rect(DISPLAYSURF, self.pieceColorA, rt )
# draw boarder around the chip.
pygame.draw.rect(DISPLAYSURF, self.chipBoarderColor, rt + tuple([5]) )
# draws the chips that are on the bar.
if self.chipsOnBar != []:
# the chips on the bar are only expressed by their owner
for i, owner in enumerate(self.chipsOnBar):
pygame.draw.circle(DISPLAYSURF, owner, (375, 325 + (50 * i * -1 if i % 2 != 0 else 1)), 25)
# draws the message box as well as the message.
pygame.draw.rect(DISPLAYSURF, self.messageBoxColor, (750,225,125,200))
self.displayText((762,325))
# show rolled dice.
if self.diceRolled == True: # dice are rolled:
if self.rollOff == True:
xTup = (150,550)
color = 'A'
elif self.diceRolledColor == self.pieceColorA: # if player A rolled
xTup = (450,550)
color = 'A'
else: # B rolled
xTup = (150,250)
color = 'B'
cube, pip = [eval('self.{}Color{}'.format(h,color)) for h in ['die','pip']] #sets cube and pip to their respective colors.
if self.rollOff == True:
colorAlt = 'A' if color == 'B' else 'B'
cubeAlt, pipAlt = [eval('self.{}Color{}'.format(h,colorAlt)) for h in ['die','pip']]
for i,x in enumerate(xTup):
if self.rollOff == True:
colorSequence = ((cube,pip),(cubeAlt,pipAlt))
else:
colorSequence = ((cube,pip),(cube,pip))
pygame.draw.rect(DISPLAYSURF,colorSequence[i][0],(x,300,50,50)) # draw cube rectangle to board
# in the case of a roll off, then rollA is the roll of player A and roll B is the role of player B.
if x in [450,150]:
roll = self.rollA
else:
roll = self.rollB
o = {1:((x+25,325,10),),2:((x+35,315,5),(x+15,335,5))}
p = {3:o[2] + ((x+25,325,5),), 4: tuple([(x + m,300 + n,5) for m in [15,35] for n in [15,35]])}
q = {5:(p[4]+ p[3]), 6:(tuple([(x+m,300+n,5) for m in [15,35] for n in [10,25,40]]))}
rollDict = merge_two_dicts(o,p); rollDict = merge_two_dicts(rollDict,q)
for c in rollDict[roll]:
pygame.draw.circle(DISPLAYSURF,colorSequence[i][1],c[:2],c[2])
for space in self.spaceList:
space.drawToBoard()
#pygame.display.update()
def displayText(self, pos, textSize = 20):
'''
displays string 'text' at position 'pos'
you may also define the text size.
'''
# DroidSerif = /usr/share/fonts/truetype/droid/DroidSerif-Bold.ttf
fontObj = pygame.font.Font('freesansbold.ttf',textSize)
textSurfaceObj = fontObj.render(self.message, True, self.messageTextColor)
textRectObj = textSurfaceObj.get_rect()
textRectObj.center = (pos)
DISPLAYSURF.blit(textSurfaceObj, textRectObj)
def addChipToBar(self, color):
self.chipsOnBar.append(color)
def removeChipFromBar(self, color):
self.chipsOnBar.remove(color)
def makeMove(self, move):
''' makes the move and edits the board state.
Returns 0 if the move was valid. Returns -1 if move was invalid.'''
if move.inverse == False: # move is not an inverse move:
if self.isValidMove(move) == False: return -1
hit = False
if self.spaceList[move.toWhere].spaceOwner != move.color and self.spaceList[move.toWhere].howManyChips == 1 and self.spaceList[move.toWhere].spaceOwner != None: # hit the opponent
hit = True
self.spaceList[move.toWhere].spaceOwner = move.color
self.spaceList[move.toWhere].howManyChips = 1
hitColor = self.pieceColorA if move.color == self.pieceColorB else self.pieceColorB
self.addChipToBar(hitColor)
#return 0
if move.fromWhere == 'offboard':
self.removeFromSideColumn(move.color)
elif move.toWhere == 'offboard':
self.addToSideColumn(move.color)
if type(move.fromWhere) != str:
try:
self.spaceList[move.fromWhere].howManyChips -= 1
if self.spaceList[move.fromWhere].howManyChips == 0: # no one is on the space
self.spaceList[move.fromWhere].spaceOwner = None
except IndexError: # space was bogus
pass
else:
if move.fromWhere == 'bar':
self.removeChipFromBar(move.color)
if type(move.toWhere) != str:
#print move.toWhere
if hit == False:
self.spaceList[move.toWhere].howManyChips += 1
self.spaceList[move.toWhere].spaceOwner = move.color
return 0
def addToSideColumn(self,color):
''' adds a piece to the side column. If the side column reaches 15, then that player wins!'''
self.movedOff[color] += 1
def removeFromSideColumn(self, color):
self.movedOff[color] -= 1
def hasWon(self,color):
''' returns if the color has won'''
return self.movedOff[color] == 15
def isValidMove(self,move):
''' returns true if the roll is valid given the current board.'''
assert type(move.roll) == int, 'Roll not passed as integer.'
# make sure the owner is moving existing pieces
# make sure the owner is not moving pieces outside of the barriers of the board.
if type(move.toWhere) != str:
if (0 <= move.toWhere < 24) == False:
return False
if type(move.fromWhere) != str:
if (0 <= move.fromWhere < 24) == False: return False
if self.spaceList[move.fromWhere].howManyChips == 0: # no chips on requested space
return False
# make sure that the owner is moving pieces that belong to him.
if move.color != self.spaceList[move.fromWhere].spaceOwner:
return False
else:
if move.color not in self.chipsOnBar: return False
# make sure the owner is not moving onto a spot owned by the opponent
if move.toWhere != 'offboard':
if self.spaceList[move.toWhere].howManyChips >= 2 and self.spaceList[move.toWhere].spaceOwner != move.color:
return False
# make sure pieces are not on the bar while move is being made
if move.fromWhere != 'bar' and move.color in self.chipsOnBar:
return False
# make sure roll is consistent with the move made.
if type(move.fromWhere) == str or type(move.toWhere) == str: # moving off the bar
if type(move.fromWhere) == str:
x = move.toWhere
else:
x = move.fromWhere
if move.color == self.pieceColorA:
moveDistance = 24 - x
elif move.color == self.pieceColorB:
moveDistance = x + 1
elif move.fromWhere != str and move.toWhere != str:
moveDistance = abs(move.fromWhere - move.toWhere)
if moveDistance != move.roll:
print 'triggered'
return False
# make sure player already hasn't used that roll yet.
if move.ignoreRollDict == False:
try:
if move.rollDict[moveDistance] == True: # move already used
return False
except KeyError:
return False
# make sure player has all of his chips in home base before moving chips off
if move.toWhere == 'offboard':
homeBase = range(6) if move.color == self.pieceColorA else range(18,24)
for space in self.spaceList:
if space.index not in homeBase and space.spaceOwner == move.color:
return False
# make sure if player is moving off the bar that the player is moving into the other player's home base. (and not anywhere else)
if move.fromWhere == 'bar':
if move.color == self.pieceColorA and move.toWhere not in range(18, 23 + 1): return False
if move.color == self.pieceColorB and move.toWhere not in range(5+1): return False
elif move.toWhere != 'offboard': # make sure player is moving in the right direction.
if move.color == self.pieceColorA and move.fromWhere <= move.toWhere:
return False
if move.color == self.pieceColorB and move.fromWhere >= move.toWhere:
return False
return True
def __repr__(self):
return str([x for x in self.spaceList])
def merge_two_dicts(x, y):
'''Given two dicts, merge them into a new dict as a shallow copy.'''
z = x.copy()
z.update(y)
return z
| 28.292343
| 181
| 0.678038
|
794a570eaf15ca26efc54c173d2eeaed507a0476
| 12,395
|
py
|
Python
|
cvap/module/encoder/clip_head.py
|
zhaoyanpeng/lvamodel
|
93b06ff43ae6a76323cecea4c10cf457945c2711
|
[
"MIT"
] | 6
|
2021-12-20T06:01:56.000Z
|
2022-03-25T06:44:50.000Z
|
cvap/module/encoder/clip_head.py
|
zhaoyanpeng/vipant
|
93b06ff43ae6a76323cecea4c10cf457945c2711
|
[
"MIT"
] | null | null | null |
cvap/module/encoder/clip_head.py
|
zhaoyanpeng/vipant
|
93b06ff43ae6a76323cecea4c10cf457945c2711
|
[
"MIT"
] | null | null | null |
from fvcore.common.registry import Registry
from omegaconf.listconfig import ListConfig
from collections import OrderedDict
import re
import math
import copy
import threading
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from .. import (
build_encoder_module, interp_clip_vp_embedding, interp_conv_weight_spatial
)
from .audio_head import position_resolution, load_pos_embedding
""" The idea is to abstract an encoding head as a four-layer encoder.
(1) backbone encoder (most likely to be shared)
(2-3) modality-specific pre- / post-encoding layer
(4) class / positional embedding (likely to be shared)
"""
class MetaHead(nn.Module):
def __init__(self, cfg, **kwargs):
super().__init__()
keep_hp = kwargs.pop("keep_hp", False)
reference = kwargs.pop("reference", None)
shared_modules = kwargs.pop("shared_modules", [])
kwargs.update({
"width": cfg.width, "embed_dim": cfg.embed_dim,
"ctx_len": cfg.ctx_len, "resolution": cfg.resolution
}) # shared hyperparameters
self.encoder = (
build_encoder_module(cfg.encoder, **kwargs)
#if "encoder" not in shared_modules else reference.encoder
) # backbone
self.pre_encoder = (
build_encoder_module(cfg.pre_encoder, **kwargs)
#if "pre_encoder" not in shared_modules else reference.pre_encoder
)
self.post_encoder = (
build_encoder_module(cfg.post_encoder, **kwargs)
#if "post_encoder" not in shared_modules else reference.post_encoder
)
self.pre_encoder_addon = build_encoder_module(
cfg.pre_encoder_addon, **kwargs
) # in-between `pre_encoder` & `encoder`
self.post_encoder_addon = build_encoder_module(
cfg.post_encoder_addon, **kwargs
) # in-between `encoder` & `post_encoder`
# have to build all modules to get `position_resolution`, even though
# we will probably replace all the modules by those of the `reference`
position_resolution = (
self.pre_encoder.position_resolution or \
self.encoder.position_resolution or \
self.post_encoder.position_resolution
)
kwargs.update({
"position_resolution": position_resolution
})
self.misc = build_encoder_module(cfg.misc, **kwargs)
# time to share modules
#self.replace_modules(shared_modules, reference, keep_hp=keep_hp)
def replace_modules(self, shared_modules=[], reference=None, keep_hp=False, **kwargs):
""" keep_hp: keep selected hyperparameters
"""
if len(shared_modules) < 1 or reference is None:
return []
module_list = ["encoder", "pre_encoder", "post_encoder", "misc"]
ref_modules = list()
for module in module_list:
if module not in shared_modules:
continue
ref_modules.append(module)
self_module = eval(f"self.{module}")
refr_module = eval(f"reference.{module}")
#print(f"RP A {module} {self_module.hp} {refr_module.hp} {self_module == refr_module}")
if hasattr(self_module, "replace_modules"):
self_module.replace_modules(refr_module, keep_hp=keep_hp)
new_self_module = eval(f"self.{module}")
#print(f"RP B {module} {self_module.hp} {refr_module.hp} {self_module == refr_module} {new_self_module == refr_module}")
else: # via reference, not recommended
hp = self_module.hp
exec(f"self.{module} = reference.{module}") # modified via reference
if keep_hp:
exec(f"self.{module}.hp = {hp}") # so the `reference` is modified
new_self_module = eval(f"self.{module}")
#print(f"RP C {module} {self_module.hp} {refr_module.hp} {self_module == refr_module} {new_self_module == refr_module}")
return ref_modules
def forward(self, x: torch.Tensor, *args, **kwargs):
kwargs.update({
"positional_embedding": self.misc.pos_embedding,
"class_embedding": self.misc.cls_embedding,
"position_resolution": self.misc.position_resolution
})
x = self.pre_encoder(x, **kwargs) # (N, L, D)
x = self.pre_encoder_addon(x, **kwargs) # (N, L, D)
# TODO assumed 3d `x`
x = x.permute(1, 0, 2) if not self.encoder.batch_first else x # (N, L, D) -> (L, N, D)
x = self.encoder(x, **kwargs)
x = x.permute(1, 0, 2) if not self.encoder.batch_first else x # (L, N, D) -> (N, L, D)
mask = self.pre_encoder.mask #or self.encoder.mask # text) postion of cls token; audio/image) ?
x = self.post_encoder_addon(x, **kwargs)
x = self.post_encoder(x, mask=mask, **kwargs)
if kwargs.get("normalized", False):
x = x / x.norm(dim=-1, keepdim=True)
#print(f"{threading.current_thread().ident} x --{kwargs.get('normalized', False)}")
return x
class CLIPImageHead(MetaHead):
def __init__(self, cfg, **kwargs):
super().__init__(cfg, **kwargs)
def copy_state_dict(self, state_dict):
if not self.encoder.batch_first: # TransformerBackbone
pre_keys = {"conv1.weight"}
post_keys = {"proj"}
misc_keys = {"positional_embedding", "class_embedding"}
old_dict = OrderedDict()
for k, v in state_dict.items():
if k in pre_keys:
k = f"pre_encoder.{k}"
elif k in post_keys:
k = f"post_encoder.{k}"
elif k in misc_keys:
k = f"misc.{k}"
else:
#k = re.sub("^ln_\w+\.", "ln.", k)
k = re.sub("^transformer\.", "encoder.", k)
k = re.sub("^ln_pre\.", "pre_encoder.ln.", k)
k = re.sub("^ln_post\.", "post_encoder.ln.", k)
old_dict[k] = v
else: # ResNetBackbone
old_dict = OrderedDict()
for k, v in state_dict.items():
if re.match("layer\d+\.", k):
k = f"encoder.{k}"
elif re.match("attnpool\.", k):
k = re.sub("^attnpool\.", "post_encoder.", k)
else:
k = f"pre_encoder.{k}"
old_dict[k] = v
pos_key = "post_encoder.positional_embedding"
new_key = "misc." + pos_key.rsplit(".")[-1]
old_dict[new_key] = old_dict.pop(pos_key)
new_dict = self.state_dict()
new_keys = set(new_dict.keys())
old_keys = set(old_dict.keys())
new_dict.update(old_dict)
self.load_state_dict(new_dict)
n_o = new_keys - old_keys
o_n = old_keys - new_keys
#print(f"{n_o}\n{o_n}")
return n_o, o_n
class CLIPAudioHead(MetaHead):
def __init__(self, cfg, **kwargs):
super().__init__(cfg, **kwargs)
def from_pretrained(self, state_dict, cfg, *args, **kwargs):
excluded = ["misc.positional_embedding"]
new_dict = self.state_dict()
old_dict = {k: v for k, v in state_dict.items() if k not in excluded}
# interpolate positional embedding
key = "misc.positional_embedding"
new_pos_shape = self.misc.position_resolution
old_pos_shape = position_resolution(
cfg.model.audio.resolution, cfg.model.audio.pre_encoder.patch_size, cfg.model.audio.pre_encoder.stride
) # nrow always indicates the time dimenstion
#print(new_dict[key].shape, state_dict[key].shape, new_pos_shape, old_pos_shape)
if state_dict[key].shape[0] in {50, 197}: # from vision encoder TODO could be wrong
state_dict[key] = interp_clip_vp_embedding(
state_dict.pop(key), old_pos_shape
) # pos embed inherited from vision encoder
n_o, o_n = load_pos_embedding(
state_dict, old_dict, new_dict, key, 1, old_pos_shape, new_pos_shape
)
self.load_state_dict(new_dict)
return n_o, o_n
def copy_state_dict(self, state_dict):
if not self.encoder.batch_first: # TransformerBackbone
pre_keys = {"conv1.weight"}
post_keys = {"proj"}
misc_keys = {"positional_embedding", "class_embedding"}
old_dict = OrderedDict()
for k, v in state_dict.items():
if k in pre_keys:
k = f"pre_encoder.{k}"
elif k in post_keys:
k = f"post_encoder.{k}"
elif k in misc_keys:
k = f"misc.{k}"
else:
#k = re.sub("^ln_\w+\.", "ln.", k)
k = re.sub("^transformer\.", "encoder.", k)
k = re.sub("^ln_pre\.", "pre_encoder.ln.", k)
k = re.sub("^ln_post\.", "post_encoder.ln.", k)
old_dict[k] = v
# interpolation
pos_key = "misc.positional_embedding"
old_dict[pos_key] = interp_clip_vp_embedding(
old_dict.pop(pos_key), self.misc.position_resolution
)
else: # ResNetBackbone
old_dict = OrderedDict()
for k, v in state_dict.items():
if re.match("layer\d+\.", k):
k = f"encoder.{k}"
elif re.match("attnpool\.", k):
k = re.sub("^attnpool\.", "post_encoder.", k)
else:
k = f"pre_encoder.{k}"
old_dict[k] = v
# interpolation
pos_key = "post_encoder.positional_embedding"
new_key = "misc." + pos_key.rsplit(".")[-1]
old_dict[new_key] = interp_clip_vp_embedding(
old_dict.pop(pos_key), self.misc.position_resolution
)
# take care of conv1
new_dict = self.state_dict()
conv_key = "pre_encoder.conv1.weight"
conv_weight = interp_conv_weight_spatial(old_dict[conv_key], new_dict[conv_key].shape[-2:])
use_mean = new_dict[conv_key].shape[1] != 1
old_dict[conv_key] = conv_weight if use_mean else conv_weight.mean(1, keepdim=True)
# update
new_keys = set(new_dict.keys())
old_keys = set(old_dict.keys())
new_dict.update(old_dict)
self.load_state_dict(new_dict)
n_o = new_keys - old_keys
o_n = old_keys - new_keys
#print(f"{n_o}\n{o_n}")
return n_o, o_n
class CLIPTextHead(MetaHead):
def __init__(self, cfg, **kwargs):
super().__init__(cfg, **kwargs)
self.initialize_parameters()
def initialize_parameters(self):
pass #nn.init.normal_(self.positional_embedding, std=0.01)
def copy_state_dict(self, state_dict):
pre_keys = {"token_embedding.weight"}
post_keys = {}
misc_keys = {"positional_embedding"}
old_dict = OrderedDict()
for k, v in state_dict.items():
if k in pre_keys:
k = f"pre_encoder.{k}"
elif k in post_keys:
k = f"post_encoder.{k}"
elif k in misc_keys:
k = f"misc.{k}"
else:
#k = re.sub("^ln_\w+\.", "ln.", k)
k = re.sub("^transformer\.", "encoder.", k)
k = re.sub("^ln_final\.", "post_encoder.ln.", k)
k = re.sub("^text_projection", "post_encoder.proj", k)
old_dict[k] = v
new_dict = self.state_dict()
# TODO better via interpolation
pos_key = "misc.positional_embedding"
old_num = old_dict[pos_key].shape[0]
new_num = new_dict[pos_key].shape[0]
if old_num >= new_num:
old_dict[pos_key] = old_dict.pop(pos_key)[:new_num]
else:
new_dict[pos_key][:old_num] = old_dict.pop(pos_key)
old_dict[pos_key] = new_dict[pos_key] # unnecessary
new_keys = set(new_dict.keys())
old_keys = set(old_dict.keys())
new_dict.update(old_dict)
self.load_state_dict(new_dict)
n_o = new_keys - old_keys
o_n = old_keys - new_keys
#print(f"{n_o}\n{o_n}")
return n_o, o_n
| 42.303754
| 136
| 0.572166
|
794a5753ba8737ead1f0997480e430a83ff0d385
| 1,302
|
py
|
Python
|
blog/migrations/0001_initial.py
|
rachelhs/wagtail-starter
|
2363517fd91e279d564ff899dfa3cdfd7ec01aa9
|
[
"MIT"
] | null | null | null |
blog/migrations/0001_initial.py
|
rachelhs/wagtail-starter
|
2363517fd91e279d564ff899dfa3cdfd7ec01aa9
|
[
"MIT"
] | null | null | null |
blog/migrations/0001_initial.py
|
rachelhs/wagtail-starter
|
2363517fd91e279d564ff899dfa3cdfd7ec01aa9
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.5 on 2021-01-08 16:16
from django.db import migrations, models
import django.db.models.deletion
import wagtail.core.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0059_apply_collection_ordering'),
]
operations = [
migrations.CreateModel(
name='BlogPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')),
('description', models.CharField(blank=True, max_length=255)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='PostPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')),
('body', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
| 32.55
| 191
| 0.582949
|
794a57879e0ee3cd81f3736c33b99a54f1584013
| 10,018
|
py
|
Python
|
allennlp/commands/predict.py
|
uysalelif/allennlp
|
9de5fb19a0f37c5ad394b4cc600e2335f00cdc74
|
[
"Apache-2.0"
] | null | null | null |
allennlp/commands/predict.py
|
uysalelif/allennlp
|
9de5fb19a0f37c5ad394b4cc600e2335f00cdc74
|
[
"Apache-2.0"
] | null | null | null |
allennlp/commands/predict.py
|
uysalelif/allennlp
|
9de5fb19a0f37c5ad394b4cc600e2335f00cdc74
|
[
"Apache-2.0"
] | null | null | null |
"""
The ``predict`` subcommand allows you to make bulk JSON-to-JSON
or dataset to JSON predictions using a trained model and its
:class:`~allennlp.predictors.predictor.Predictor` wrapper.
.. code-block:: bash
$ allennlp predict --help
usage: allennlp predict [-h] [--output-file OUTPUT_FILE]
[--weights-file WEIGHTS_FILE]
[--batch-size BATCH_SIZE] [--silent]
[--cuda-device CUDA_DEVICE] [--use-dataset-reader]
[--dataset-reader-choice {train,validation}]
[-o OVERRIDES] [--predictor PREDICTOR]
[--include-package INCLUDE_PACKAGE]
archive_file input_file
Run the specified model against a JSON-lines input file.
positional arguments:
archive_file the archived model to make predictions with
input_file path to or url of the input file
optional arguments:
-h, --help show this help message and exit
--output-file OUTPUT_FILE
path to output file
--weights-file WEIGHTS_FILE
a path that overrides which weights file to use
--batch-size BATCH_SIZE
The batch size to use for processing
--silent do not print output to stdout
--cuda-device CUDA_DEVICE
id of GPU to use (if any)
--use-dataset-reader Whether to use the dataset reader of the original
model to load Instances. The validation dataset reader
will be used if it exists, otherwise it will fall back
to the train dataset reader. This behavior can be
overridden with the --dataset-reader-choice flag.
--dataset-reader-choice {train,validation}
Indicates which model dataset reader to use if the
--use-dataset-reader flag is set. (default =
validation)
-o OVERRIDES, --overrides OVERRIDES
a JSON structure used to override the experiment
configuration
--predictor PREDICTOR
optionally specify a specific predictor to use
--include-package INCLUDE_PACKAGE
additional packages to include
"""
from typing import List, Iterator, Optional
import argparse
import sys
import json
from allennlp.commands.subcommand import Subcommand
from allennlp.common.checks import check_for_gpu, ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.common.util import lazy_groups_of
from allennlp.models.archival import load_archive
from allennlp.predictors.predictor import Predictor, JsonDict
from allennlp.data import Instance
class Predict(Subcommand):
def add_subparser(
self, name: str, parser: argparse._SubParsersAction
) -> argparse.ArgumentParser:
description = """Run the specified model against a JSON-lines input file."""
subparser = parser.add_parser(
name, description=description, help="Use a trained model to make predictions."
)
subparser.add_argument(
"archive_file", type=str, help="the archived model to make predictions with"
)
subparser.add_argument("input_file", type=str, help="path to or url of the input file")
subparser.add_argument("--output-file", type=str, help="path to output file")
subparser.add_argument(
"--weights-file", type=str, help="a path that overrides which weights file to use"
)
batch_size = subparser.add_mutually_exclusive_group(required=False)
batch_size.add_argument(
"--batch-size", type=int, default=1, help="The batch size to use for processing"
)
subparser.add_argument(
"--silent", action="store_true", help="do not print output to stdout"
)
cuda_device = subparser.add_mutually_exclusive_group(required=False)
cuda_device.add_argument(
"--cuda-device", type=int, default=-1, help="id of GPU to use (if any)"
)
subparser.add_argument(
"--use-dataset-reader",
action="store_true",
help="Whether to use the dataset reader of the original model to load Instances. "
"The validation dataset reader will be used if it exists, otherwise it will "
"fall back to the train dataset reader. This behavior can be overridden "
"with the --dataset-reader-choice flag.",
)
subparser.add_argument(
"--dataset-reader-choice",
type=str,
choices=["train", "validation"],
default="validation",
help="Indicates which model dataset reader to use if the --use-dataset-reader "
"flag is set.",
)
subparser.add_argument(
"-o",
"--overrides",
type=str,
default="",
help="a JSON structure used to override the experiment configuration",
)
subparser.add_argument(
"--predictor", type=str, help="optionally specify a specific predictor to use"
)
subparser.set_defaults(func=_predict)
return subparser
def _get_predictor(args: argparse.Namespace) -> Predictor:
check_for_gpu(args.cuda_device)
archive = load_archive(
args.archive_file,
weights_file=args.weights_file,
cuda_device=args.cuda_device,
overrides=args.overrides,
)
return Predictor.from_archive(
archive, args.predictor, dataset_reader_to_load=args.dataset_reader_choice
)
class _PredictManager:
def __init__(
self,
predictor: Predictor,
input_file: str,
output_file: Optional[str],
batch_size: int,
print_to_console: bool,
has_dataset_reader: bool,
) -> None:
self._predictor = predictor
self._input_file = input_file
if output_file is not None:
self._output_file = open(output_file, "w", encoding="utf-8")
else:
self._output_file = None
self._batch_size = batch_size
self._print_to_console = print_to_console
if has_dataset_reader:
self._dataset_reader = predictor._dataset_reader
else:
self._dataset_reader = None
def _predict_json(self, batch_data: List[JsonDict]) -> Iterator[str]:
if len(batch_data) == 1:
results = [self._predictor.predict_json(batch_data[0])]
else:
results = self._predictor.predict_batch_json(batch_data)
for output in results:
yield self._predictor.dump_line(output)
def _predict_instances(self, batch_data: List[Instance]) -> Iterator[str]:
if len(batch_data) == 1:
results = [self._predictor.predict_instance(batch_data[0])]
else:
results = self._predictor.predict_batch_instance(batch_data)
for output in results:
yield self._predictor.dump_line(output)
def _maybe_print_to_console_and_file(
self, index: int, prediction: str, model_input: str = None
) -> None:
if self._print_to_console:
if model_input is not None:
print(f"input {index}: ", model_input)
print("prediction: ", prediction)
if self._output_file is not None:
self._output_file.write(prediction)
def _get_json_data(self) -> Iterator[JsonDict]:
if self._input_file == "-":
for line in sys.stdin:
if not line.isspace():
yield self._predictor.load_line(line)
else:
input_file = cached_path(self._input_file)
with open(input_file, "r", encoding="utf-8") as file_input:
for line in file_input:
if not line.isspace():
yield self._predictor.load_line(line)
def _get_instance_data(self) -> Iterator[Instance]:
if self._input_file == "-":
raise ConfigurationError("stdin is not an option when using a DatasetReader.")
elif self._dataset_reader is None:
raise ConfigurationError("To generate instances directly, pass a DatasetReader.")
else:
yield from self._dataset_reader.read(self._input_file)
def run(self) -> None:
has_reader = self._dataset_reader is not None
index = 0
if has_reader:
for batch in lazy_groups_of(self._get_instance_data(), self._batch_size):
for model_input_instance, result in zip(batch, self._predict_instances(batch)):
self._maybe_print_to_console_and_file(index, result, str(model_input_instance))
index = index + 1
else:
for batch_json in lazy_groups_of(self._get_json_data(), self._batch_size):
for model_input_json, result in zip(batch_json, self._predict_json(batch_json)):
self._maybe_print_to_console_and_file(
index, result, json.dumps(model_input_json, ensure_ascii=False)
)
index = index + 1
if self._output_file is not None:
self._output_file.close()
def _predict(args: argparse.Namespace) -> None:
predictor = _get_predictor(args)
if args.silent and not args.output_file:
print("--silent specified without --output-file.")
print("Exiting early because no output will be created.")
sys.exit(0)
manager = _PredictManager(
predictor,
args.input_file,
args.output_file,
args.batch_size,
not args.silent,
args.use_dataset_reader,
)
manager.run()
| 39.132813
| 99
| 0.609004
|
794a57d1822de2e2074ff932cfa874dcbba9a428
| 5,332
|
py
|
Python
|
Name_Popularity_Searching/babygraphics.py
|
tzuling/sc101-project
|
04991505d43b1f998851141bfaf8af083ee9a6c2
|
[
"MIT"
] | null | null | null |
Name_Popularity_Searching/babygraphics.py
|
tzuling/sc101-project
|
04991505d43b1f998851141bfaf8af083ee9a6c2
|
[
"MIT"
] | null | null | null |
Name_Popularity_Searching/babygraphics.py
|
tzuling/sc101-project
|
04991505d43b1f998851141bfaf8af083ee9a6c2
|
[
"MIT"
] | null | null | null |
"""
SC101 Baby Names Project
Adapted from Nick Parlante's Baby Names assignment by
Jerry Liao.
YOUR DESCRIPTION HERE
"""
import tkinter
import babynames
import babygraphicsgui as gui
FILENAMES = [
'data/full/baby-1900.txt', 'data/full/baby-1910.txt',
'data/full/baby-1920.txt', 'data/full/baby-1930.txt',
'data/full/baby-1940.txt', 'data/full/baby-1950.txt',
'data/full/baby-1960.txt', 'data/full/baby-1970.txt',
'data/full/baby-1980.txt', 'data/full/baby-1990.txt',
'data/full/baby-2000.txt', 'data/full/baby-2010.txt'
]
CANVAS_WIDTH = 1000
CANVAS_HEIGHT = 600
YEARS = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010]
GRAPH_MARGIN_SIZE = 20
COLORS = ['red', 'purple', 'green', 'blue', 'black']
TEXT_DX = 2
LINE_WIDTH = 2
MAX_RANK = 1000
def get_x_coordinate(width, year_index):
"""
Given the width of the canvas and the index of the current year
in the YEARS list, returns the x coordinate of the vertical
line associated with that year.
Input:
width (int): The width of the canvas
year_index (int): The index of the current year in the YEARS list
Returns:
x_coordinate (int): The x coordinate of the vertical line associated
with the specified year.
"""
x_width = width/len(YEARS)-1
x_coordinate = x_width * year_index
return x_coordinate
def draw_fixed_lines(canvas):
"""
Erases all existing information on the given canvas and then
draws the fixed background lines on it.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
Returns:
This function does not return any value.
"""
canvas.delete('all') # delete all existing lines from the canvas
# Write your code below this line
# Top line
canvas.create_line(GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, CANVAS_WIDTH-GRAPH_MARGIN_SIZE,
GRAPH_MARGIN_SIZE)
# Bottom line
canvas.create_line(GRAPH_MARGIN_SIZE, CANVAS_HEIGHT-GRAPH_MARGIN_SIZE, CANVAS_WIDTH-GRAPH_MARGIN_SIZE,
CANVAS_HEIGHT-GRAPH_MARGIN_SIZE)
# Year line
for i in range(len(YEARS)):
x_coordinate = get_x_coordinate(CANVAS_WIDTH, i)
canvas.create_line(GRAPH_MARGIN_SIZE+x_coordinate, 0, GRAPH_MARGIN_SIZE+x_coordinate,
CANVAS_HEIGHT)
x_text = GRAPH_MARGIN_SIZE + x_coordinate + TEXT_DX
y_text = CANVAS_HEIGHT-GRAPH_MARGIN_SIZE + TEXT_DX
canvas.create_text(x_text, y_text, text=YEARS[i], anchor=tkinter.NW)
#################################
def draw_names(canvas, name_data, lookup_names):
"""
Given a dict of baby name data and a list of name, plots
the historical trend of those names onto the canvas.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
name_data (dict): Dictionary holding baby name data
lookup_names (List[str]): A list of names whose data you want to plot
Returns:
This function does not return any value.
"""
draw_fixed_lines(canvas) # draw the fixed background grid
# Write your code below this line
"""
x1, y1: the current point
x2, y2: the next point
rank: the name of rank of the year, "*" for more than 1000
c: the parameter of COLORS
"""
c = 0
for lookup_name in lookup_names:
dic = name_data[lookup_name]
for i in range(len(YEARS)-1): # the last point doesn`t need to create line
x1 = GRAPH_MARGIN_SIZE + get_x_coordinate(CANVAS_WIDTH, i)
if str(YEARS[i]) in dic:
rank = dic[str(YEARS[i])]
y1 = GRAPH_MARGIN_SIZE + int(rank) * CANVAS_HEIGHT/MAX_RANK
else:
y1 = CANVAS_HEIGHT - GRAPH_MARGIN_SIZE
rank = "*"
canvas.create_text(x1 + TEXT_DX, y1, text=f'{lookup_name} {rank}',
anchor=tkinter.SW, fill=COLORS[c])
x2 = GRAPH_MARGIN_SIZE + get_x_coordinate(CANVAS_WIDTH, i+1)
if str(YEARS[i+1]) in dic:
rank = dic[str(YEARS[i + 1])]
y2 = GRAPH_MARGIN_SIZE + int(rank) * CANVAS_HEIGHT / MAX_RANK
else:
y2 = CANVAS_HEIGHT - GRAPH_MARGIN_SIZE
rank = "*"
canvas.create_line(x1, y1, x2, y2, width=LINE_WIDTH, fill=COLORS[c])
canvas.create_text(x2 + TEXT_DX, y2, text=f'{lookup_name} {rank}',
anchor=tkinter.SW, fill=COLORS[c])
c += 1
if c == len(COLORS):
c = 0
#################################
# main() code is provided, feel free to read through it but DO NOT MODIFY
def main():
# Load data
name_data = babynames.read_files(FILENAMES)
# Create the window and the canvas
top = tkinter.Tk()
top.wm_title('Baby Names')
canvas = gui.make_gui(top, CANVAS_WIDTH, CANVAS_HEIGHT, name_data, draw_names, babynames.search_names)
# Call draw_fixed_lines() once at startup so we have the lines
# even before the user types anything.
draw_fixed_lines(canvas)
# This line starts the graphical loop that is responsible for
# processing user interactions and plotting data
top.mainloop()
if __name__ == '__main__':
main()
| 33.325
| 106
| 0.632033
|
794a57f28fa24620d12066693f9e984ec6691d77
| 9,714
|
py
|
Python
|
emu8/tui8.py
|
dreary-dugong/emu8
|
ae852c1126dd9d332d677ad050b50c0ef4b67dee
|
[
"MIT"
] | null | null | null |
emu8/tui8.py
|
dreary-dugong/emu8
|
ae852c1126dd9d332d677ad050b50c0ef4b67dee
|
[
"MIT"
] | null | null | null |
emu8/tui8.py
|
dreary-dugong/emu8
|
ae852c1126dd9d332d677ad050b50c0ef4b67dee
|
[
"MIT"
] | 1
|
2022-02-22T20:51:13.000Z
|
2022-02-22T20:51:13.000Z
|
import curses
import debug8
class Tui:
"""represent the terminal user interface for a chip8 Chip object"""
def __init__(self, stdscr, chip, compmode):
"""inintialize instance data and set curses settings"""
self.stdscr = stdscr
self.chip = chip
self.compmode = compmode # are we running in fast mode or comprehensive mode
curses.initscr() # intialize screen
curses.noecho() # don't write pressed characters to the screen
curses.curs_set(0) # set cursor to invisible
if self.compmode:
self.init_windows_comp()
else:
self.init_windows_fast()
def init_windows_fast(self):
"""initialize the minmal number of windows"""
self.init_chip_win()
self.init_key_win()
self.init_input_win()
def init_windows_comp(self):
"""initialize all windows"""
self.init_chip_win()
self.init_reg_win()
self.init_mem_win()
self.init_key_win()
self.init_desc_win()
self.init_input_win()
def init_chip_win(self):
"""create window to display chip-8 screen contents"""
self.chipWin = curses.newwin(33, 129, 0, 0)
# set colors
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_GREEN)
self.chipWinColors = curses.color_pair(1)
for i in range(32):
self.chipWin.addstr(i, 0, " " * 129)
self.chipWin.refresh()
def init_reg_win(self):
"""create window to display register contents and insert labels"""
self.regWin = curses.newwin(6, 41, 33, 0)
# registers 0 - F
for row in range(4):
for col in range(4):
reg = 4 * row + col
regstr = f"v{hex(reg)[2]}:{Tui.double_hex(0)} "
self.regWin.addstr(row, col * len(regstr), regstr)
# special purpose registers
istr = f"rI:{Tui.triple_hex(0)}"
dtstr = f"rDT:{Tui.double_hex(0)}"
ststr = f"rST:{Tui.double_hex(0)}"
self.regWin.addstr(5, 0, istr)
self.regWin.addstr(" " + dtstr)
self.regWin.addstr(" " + ststr)
self.regWin.refresh()
def init_mem_win(self):
"""create window to display chip memory contents"""
self.memWin = curses.newwin(45, 27, 0, 130)
memlimit = 20
# each row in the 3 columns
for y in range(2 * memlimit + 1):
# memory address column
self.memWin.addstr(y, 0, Tui.triple_hex(0))
# memory value column
self.memWin.addstr(y, 6, Tui.double_hex(0))
# assembly instruction column
if y % 2 == 0:
self.memWin.addstr(y, 12, debug8.inst_to_asm(0))
else:
self.memWin.addstr(y, 12, " ")
curses.init_pair(5, curses.COLOR_BLACK, curses.COLOR_WHITE)
# self.memWin.addstr(3, 6 * memlimit, "^", curses.color_pair(5))
self.memWin.refresh()
def init_key_win(self):
"""create window to display keys pressed on the chip"""
self.keyWin = curses.newwin(5, 15, 33, 42)
offset = 5
# set key coordinates in the window
self.keyCoords = dict()
# 1-9
for row in range(3):
for col in range(3):
self.keyCoords[row * 3 + col + 1] = (row, col * 2 + offset)
# C-F
for row in range(4):
self.keyCoords[12 + row] = (row, 2 * 3 + offset)
# everything else
self.keyCoords[10] = (3, 0 + offset) # A
self.keyCoords[0] = (3, 2 + offset) # 0
self.keyCoords[11] = (3, 4 + offset) # B
# put keys on the window
for key in range(16):
y, x = self.keyCoords[key]
self.keyWin.addstr(y, x, hex(key)[2])
# set highlight color for update method
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_WHITE)
self.keyHighlightColor = curses.color_pair(2)
self.keyWin.refresh()
def init_desc_win(self):
"""create a window to describe currently executing instructions"""
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_WHITE)
self.descHighlightColor = curses.color_pair(3)
self.descWin = curses.newwin(3, 100, 41, 56)
self.descWin.addstr(0, 0, "invalid instruction")
self.descWin.addstr(1, 0, "invalid instruction", self.descHighlightColor)
self.descWin.addstr(2, 0, "invalid instruction", curses.color_pair(0))
self.descWin.refresh()
def init_input_win(self):
"""initialize a blank window to accept user input"""
self.inputWin = curses.newwin(1, 1, 39, 0)
self.inputWin.addstr(0, 0, "") # add blank sting to set cursor
def update(self):
"""alternative method to update all windows"""
if self.compmode:
self.update_windows_comp()
else:
self.update_windows_fast()
def update_windows_fast(self):
"""update the minimal number of windows (fast mode)"""
self.update_chip_win()
self.update_key_win()
self.update_input_win()
def update_windows_comp(self):
"""update all windows (comprehensive mode)"""
self.update_chip_win()
self.update_reg_win()
self.update_mem_win()
self.update_key_win()
self.update_desc_win()
self.update_input_win()
def update_chip_win(self):
"""update the chip display window to match the chip"""
# note that the display on the chip is sideways
disp = self.chip.disp
for x, column in enumerate(disp):
for y, val in enumerate(column):
if val:
self.chipWin.addstr(y, x * 2, " ", self.chipWinColors)
else:
self.chipWin.addstr(y, x * 2, " ", curses.color_pair(0))
self.chipWin.refresh()
def update_reg_win(self):
"""update register window to match contents of chip registers"""
# registers 0-15
for row in range(4):
for col in range(4):
reg = 4 * row + col
valstr = Tui.double_hex(self.chip.regs[reg])
self.regWin.addstr(row, 10 * col + 3, valstr)
# special purpose registers
# I
valstr = Tui.triple_hex(self.chip.regI)
self.regWin.addstr(5, 3, valstr)
# DT
valstr = Tui.double_hex(self.chip.dt)
self.regWin.addstr(5, 15, valstr)
# ST
valstr = Tui.double_hex(self.chip.st)
self.regWin.addstr(5, 26, valstr)
self.regWin.refresh()
def update_mem_win(self):
"""update memory window to match contents of chip memory"""
memlimit = 20 # this should be instance data probably
pc = self.chip.pc
mem = self.chip.mem
self.memWin.erase()
y = 0
for addr in range(pc - memlimit, pc + memlimit + 1):
if addr == pc:
color = 5
else:
color = 0
# memory address column
self.memWin.addstr(y, 0, Tui.triple_hex(addr), curses.color_pair(color))
# memory value column
self.memWin.addstr(
y, 6, Tui.double_hex(mem[addr]), curses.color_pair(color)
)
# assembly instruction column
if (addr - (pc % 2)) % 2 == 0:
inst = (mem[addr] << 8) + mem[addr + 1]
self.memWin.addstr(
y, 12, debug8.inst_to_asm(inst), curses.color_pair(color)
)
else:
self.memWin.addstr(y, 12, " ", curses.color_pair(color))
y += 1
self.memWin.refresh()
def update_key_win(self):
"""update key window to match contents of keys on chip"""
for key, value in enumerate(self.chip.keys):
y, x = self.keyCoords[key]
if value:
self.keyWin.addstr(y, x, hex(key)[2], self.keyHighlightColor)
else:
self.keyWin.addstr(y, x, hex(key)[2], curses.color_pair(0))
self.keyWin.refresh()
def update_desc_win(self):
"""update description window with previous, current, and next instruction descriptions"""
# note that we base this off mem so previous and current may not be accurate since we don't
# account for jumps
pc = self.chip.pc
mem = self.chip.mem
self.descWin.erase()
prevInst = (mem[pc - 2] << 8) + mem[pc - 1]
currInst = (mem[pc] << 8) + mem[pc + 1]
nextInst = (mem[pc + 2] << 8) + mem[pc + 3]
prevDesc = debug8.inst_to_asmdesc(prevInst)
currDesc = debug8.inst_to_asmdesc(currInst)
nextDesc = debug8.inst_to_asmdesc(nextInst)
self.descWin.addstr(0, 0, prevDesc)
self.descWin.addstr(1, 0, currDesc, self.descHighlightColor)
self.descWin.addstr(2, 0, nextDesc, curses.color_pair(0))
self.descWin.refresh()
def update_input_win(self):
"""update input window to set cursor to receive input"""
self.inputWin.addstr(0, 0, "")
@staticmethod
def double_hex(n):
"""return a two digit hex representation of an integer"""
h = hex(n)
if len(h) == 3:
h = h[:2] + "0" + h[2]
return h
@staticmethod
def triple_hex(n):
"""return a three digit hex representation of an integer"""
h = Tui.double_hex(n)
if len(h) == 4:
h = h[:2] + "0" + h[2:]
return h
def main(stdscr):
pass
if __name__ == "__main__":
curses.wrapper(main)
| 31.84918
| 99
| 0.56434
|
794a5a02d705e8b9eb4b065a24249b2634cf296a
| 2,477
|
py
|
Python
|
pyFileFixity/lib/gooey/python_bindings/gooey_parser.py
|
lrq3000/rfigc
|
a68021a506fee1aabea6b2fb88e685de347d900f
|
[
"MIT"
] | 82
|
2015-03-20T18:43:37.000Z
|
2022-03-05T13:23:12.000Z
|
pyFileFixity/lib/gooey/python_bindings/gooey_parser.py
|
lrq3000/rfigc
|
a68021a506fee1aabea6b2fb88e685de347d900f
|
[
"MIT"
] | 9
|
2015-12-05T17:32:14.000Z
|
2021-06-11T15:51:38.000Z
|
pyFileFixity/lib/gooey/python_bindings/gooey_parser.py
|
hadi-f90/pyFileFixity
|
2cb3dd6225a6b062a98fa2d61c4a0a29d8010428
|
[
"MIT"
] | 10
|
2015-12-13T18:51:44.000Z
|
2022-02-21T10:50:28.000Z
|
from argparse import ArgumentParser, _SubParsersAction
class GooeySubParser(_SubParsersAction):
def __init__(self, *args, **kwargs):
super(GooeySubParser, self).__init__(*args, **kwargs)
class GooeyParser(object):
def __init__(self, **kwargs):
self.__dict__['parser'] = ArgumentParser(**kwargs)
self.widgets = {}
@property
def _mutually_exclusive_groups(self):
return self.parser._mutually_exclusive_groups
@property
def _actions(self):
return self.parser._actions
@property
def description(self):
return self.parser.description
def add_argument(self, *args, **kwargs):
widget = kwargs.pop('widget', None)
self.parser.add_argument(*args, **kwargs)
self.widgets[self.parser._actions[-1].dest] = widget
def add_mutually_exclusive_group(self, **kwargs):
return self.parser.add_mutually_exclusive_group(**kwargs)
def add_argument_group(self, *args, **kwargs):
return self.parser.add_argument_group(*args, **kwargs)
def parse_args(self, args=None, namespace=None):
return self.parser.parse_args(args, namespace)
def add_subparsers(self, **kwargs):
if self._subparsers is not None:
self.error(_('cannot have multiple subparser arguments'))
# add the parser class to the arguments if it's not present
kwargs.setdefault('parser_class', type(self))
if 'title' in kwargs or 'description' in kwargs:
title = _(kwargs.pop('title', 'subcommands'))
description = _(kwargs.pop('description', None))
self._subparsers = self.add_argument_group(title, description)
else:
self._subparsers = self._positionals
# prog defaults to the usage message of this parser, skipping
# optional arguments and with no "usage:" prefix
if kwargs.get('prog') is None:
formatter = self._get_formatter()
positionals = self._get_positional_actions()
groups = self._mutually_exclusive_groups
formatter.add_usage(self.usage, positionals, groups, '')
kwargs['prog'] = formatter.format_help().strip()
# create the parsers action and add it to the positionals list
parsers_class = self._pop_action_class(kwargs, 'parsers')
action = parsers_class(option_strings=[], **kwargs)
self._subparsers._add_action(action)
# return the created parsers action
return action
def __getattr__(self, item):
return getattr(self.parser, item)
def __setattr__(self, key, value):
return setattr(self.parser, key, value)
| 32.592105
| 68
| 0.715785
|
794a5a3decf59c3a9824d940b5a16b079c9c3eea
| 3,201
|
py
|
Python
|
app/hid/write.py
|
tank0226/tinypilot
|
624d39e7c186418f80b6b1f4e61ee7f25a79cd3c
|
[
"MIT"
] | 1,334
|
2020-07-14T01:53:02.000Z
|
2021-06-08T09:48:28.000Z
|
app/hid/write.py
|
tank0226/tinypilot
|
624d39e7c186418f80b6b1f4e61ee7f25a79cd3c
|
[
"MIT"
] | 320
|
2020-07-07T20:18:05.000Z
|
2021-06-07T21:18:42.000Z
|
app/hid/write.py
|
tank0226/tinypilot
|
624d39e7c186418f80b6b1f4e61ee7f25a79cd3c
|
[
"MIT"
] | 124
|
2020-07-23T16:39:06.000Z
|
2021-06-04T10:22:53.000Z
|
import dataclasses
import logging
import multiprocessing
import typing
logger = logging.getLogger(__name__)
class Error(Exception):
pass
class WriteError(Error):
pass
@dataclasses.dataclass
class ProcessResult:
return_value: typing.Any = None
exception: Exception = None
def was_successful(self) -> bool:
return self.exception is None
class ProcessWithResult(multiprocessing.Process):
"""A multiprocessing.Process object that keeps track of the child process'
result (i.e., the return value and exception raised).
Inspired by:
https://stackoverflow.com/a/33599967/3769045
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Create the Connection objects used for communication between the
# parent and child processes.
self.parent_conn, self.child_conn = multiprocessing.Pipe()
def run(self):
"""Method to be run in sub-process."""
result = ProcessResult()
try:
if self._target:
result.return_value = self._target(*self._args, **self._kwargs)
except Exception as e:
result.exception = e
raise
finally:
self.child_conn.send(result)
def result(self):
"""Get the result from the child process.
Returns:
If the child process has completed, a ProcessResult object.
Otherwise, a None object.
"""
return self.parent_conn.recv() if self.parent_conn.poll() else None
def _write_to_hid_interface_immediately(hid_path, buffer):
try:
with open(hid_path, 'ab+') as hid_handle:
hid_handle.write(bytearray(buffer))
except BlockingIOError:
logger.error(
'Failed to write to HID interface: %s. Is USB cable connected?',
hid_path)
def write_to_hid_interface(hid_path, buffer):
# Avoid an unnecessary string formatting call in a write that requires low
# latency.
if logger.getEffectiveLevel() == logging.DEBUG:
logger.debug_sensitive('writing to HID interface %s: %s', hid_path,
' '.join(['0x%02x' % x for x in buffer]))
# Writes can hang, for example, when TinyPilot is attempting to write to the
# mouse interface, but the target system has no GUI. To avoid locking up the
# main server process, perform the HID interface I/O in a separate process.
write_process = ProcessWithResult(
target=_write_to_hid_interface_immediately,
args=(hid_path, buffer),
daemon=True)
write_process.start()
write_process.join(timeout=0.5)
if write_process.is_alive():
write_process.kill()
_wait_for_process_exit(write_process)
result = write_process.result()
# If the result is None, it means the write failed to complete in time.
if result is None or not result.was_successful():
raise WriteError(
'Failed to write to HID interface: %s. Is USB cable connected?' %
hid_path)
def _wait_for_process_exit(target_process):
max_attempts = 3
for _ in range(max_attempts):
target_process.join(timeout=0.1)
| 31.382353
| 80
| 0.659169
|
794a5b0293c875e8ef1b7105100a8e23c1399bd4
| 1,975
|
py
|
Python
|
python/nagcat/merlintest.py
|
marineam/nagcat
|
445d0efe1fb2ec93c31d1f9d8fa0c0563189ffaf
|
[
"Apache-2.0"
] | null | null | null |
python/nagcat/merlintest.py
|
marineam/nagcat
|
445d0efe1fb2ec93c31d1f9d8fa0c0563189ffaf
|
[
"Apache-2.0"
] | null | null | null |
python/nagcat/merlintest.py
|
marineam/nagcat
|
445d0efe1fb2ec93c31d1f9d8fa0c0563189ffaf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2008-2011 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from nagcat import log, test, scheduler, simple
class NagcatMerlinTestDummy(scheduler.Scheduler):
"""For testing purposes."""
def build_tests(self, config):
return []
def nagios_status(self):
return simple.ObjectDummy()
def get_peer_id_num_peers(self):
return 0,2
class MerlinTest(test.Test):
def __init__(self, nagcat, conf, test_index):
test.Test.__init__(self, nagcat, conf)
self._test_index = test_index
def _should_run(self):
"""Decides whether or not a test should be run, based on its task
index and the schedulers peer_id. Returns True if it should run, False
if it should not."""
peer_id, num_peers = self._nagcat.get_peer_id_num_peers()
log.debug("Running should_run, test_index=%s, num_peers=%s, peer_id=%s",
str(self._test_index), num_peers, peer_id)
if peer_id and num_peers:
if self._test_index % num_peers != peer_id:
return False
return True
def start(self):
"""Decides whether or not to start the test, based on _should_run."""
if self._should_run():
log.debug("Running test %s", self)
return super(MerlinTest,self).start()
else:
log.debug("Skipping start of %s", self)
return defer.succeed(None)
| 35.267857
| 80
| 0.673418
|
794a5b25241e6626eba0022fc469f72e616a17bc
| 704
|
py
|
Python
|
vrmjobs/probe_init.py
|
thanhledev/vrmjobs
|
3f9e19238516a3536e98c1fd1ce2c3ad8dbc1aa1
|
[
"MIT"
] | null | null | null |
vrmjobs/probe_init.py
|
thanhledev/vrmjobs
|
3f9e19238516a3536e98c1fd1ce2c3ad8dbc1aa1
|
[
"MIT"
] | null | null | null |
vrmjobs/probe_init.py
|
thanhledev/vrmjobs
|
3f9e19238516a3536e98c1fd1ce2c3ad8dbc1aa1
|
[
"MIT"
] | null | null | null |
from .host_info import HostInfo
from .vrm_type import VrmType
class ProbeInit(object):
"""
System job that will be encapsulated inside an UDP packet
and broadcast to all worker hosts inside a single network segment
by a collector host
"""
def __init__(self, packet_id: str, info: 'HostInfo', packet_type: 'VrmType'):
self.id = packet_id
self.info = info
self.type = packet_type
def __str__(self):
return "[{}] {} - {}".format(str(self.type), id, str(self.info))
def __repr__(self):
return "{}({} - {})".format(self.__class__.__name__,
id,
str(self.info))
| 29.333333
| 81
| 0.575284
|
794a5fe45515ce9238d96e7dd216d4e499dfb4c2
| 9,796
|
py
|
Python
|
pyrate/core/ref_phs_est.py
|
adu461386118/PyRate
|
0428dba9e2b3d4b6807f8c62d55c161c0dd4d75a
|
[
"Apache-2.0"
] | 1
|
2021-03-22T17:25:55.000Z
|
2021-03-22T17:25:55.000Z
|
pyrate/core/ref_phs_est.py
|
adu461386118/PyRate
|
0428dba9e2b3d4b6807f8c62d55c161c0dd4d75a
|
[
"Apache-2.0"
] | null | null | null |
pyrate/core/ref_phs_est.py
|
adu461386118/PyRate
|
0428dba9e2b3d4b6807f8c62d55c161c0dd4d75a
|
[
"Apache-2.0"
] | null | null | null |
# This Python module is part of the PyRate software package.
#
# Copyright 2020 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
This Python module implements a reference phase estimation algorithm.
"""
from pathlib import Path
from typing import List
from joblib import Parallel, delayed
import numpy as np
from pyrate.core import ifgconstants as ifc, config as cf, mpiops, shared
from pyrate.core.shared import joblib_log_level, nanmedian, Ifg
from pyrate.core import mpiops
from pyrate.configuration import Configuration
from pyrate.core.logger import pyratelogger as log
MAIN_PROCESS = 0
def est_ref_phase_patch_median(ifg_paths, params, refpx, refpy):
"""
Reference phase estimation, calculated as the median within a patch around
the supplied reference pixel.
:param list ifg_paths: List of interferogram paths or objects.
:param dict params: Dictionary of configuration parameters
:param int refpx: Reference pixel X found by ref pixel method
:param int refpy: Reference pixel Y found by ref pixel method
:return: ref_phs: Numpy array of reference phase values of size (nifgs, 1)
:rtype: ndarray
:return: ifgs: Reference phase data is removed interferograms in place
"""
half_chip_size = int(np.floor(params[cf.REF_CHIP_SIZE] / 2.0))
chipsize = 2 * half_chip_size + 1
thresh = chipsize * chipsize * params[cf.REF_MIN_FRAC]
def _inner(ifg_paths):
if isinstance(ifg_paths[0], Ifg):
ifgs = ifg_paths
else:
ifgs = [Ifg(ifg_path) for ifg_path in ifg_paths]
for ifg in ifgs:
if not ifg.is_open:
ifg.open(readonly=False)
phase_data = [i.phase_data for i in ifgs]
if params[cf.PARALLEL]:
ref_phs = Parallel(n_jobs=params[cf.PROCESSES],
verbose=joblib_log_level(cf.LOG_LEVEL))(
delayed(_est_ref_phs_patch_median)(p, half_chip_size, refpx, refpy, thresh)
for p in phase_data)
for n, ifg in enumerate(ifgs):
ifg.phase_data -= ref_phs[n]
else:
ref_phs = np.zeros(len(ifgs))
for n, ifg in enumerate(ifgs):
ref_phs[n] = _est_ref_phs_patch_median(phase_data[n], half_chip_size, refpx, refpy, thresh)
return ref_phs
process_ifgs_paths = mpiops.array_split(ifg_paths)
ref_phs = _inner(process_ifgs_paths)
return ref_phs
def _est_ref_phs_patch_median(phase_data, half_chip_size, refpx, refpy, thresh):
"""
Convenience function for ref phs estimate method 2 parallelisation
"""
patch = phase_data[refpy - half_chip_size: refpy + half_chip_size + 1,
refpx - half_chip_size: refpx + half_chip_size + 1]
patch = np.reshape(patch, newshape=(-1, 1), order='F')
nanfrac = np.sum(~np.isnan(patch))
if nanfrac < thresh:
raise ReferencePhaseError('The data window at the reference pixel '
'does not have enough valid observations. '
'Actual = {}, Threshold = {}.'.format(
nanfrac, thresh))
ref_ph = nanmedian(patch)
return ref_ph
def est_ref_phase_ifg_median(ifg_paths, params):
"""
Reference phase estimation, calculated as the median of the whole
interferogram image.
:param list ifg_paths: List of interferogram paths or objects
:param dict params: Dictionary of configuration parameters
:return: ref_phs: Numpy array of reference phase values of size (nifgs, 1)
:rtype: ndarray
:return: ifgs: Reference phase data is removed interferograms in place
"""
def _process_phase_sum(ifg_paths):
if isinstance(ifg_paths[0], Ifg):
proc_ifgs = ifg_paths
else:
proc_ifgs = [Ifg(ifg_path) for ifg_path in ifg_paths]
for ifg in proc_ifgs:
if not ifg.is_open:
ifg.open(readonly=False)
ifg_phase_data_sum = np.zeros(proc_ifgs[0].shape, dtype=np.float32)
for ifg in proc_ifgs:
ifg_phase_data_sum += ifg.phase_data
return ifg_phase_data_sum
def _inner(proc_ifgs, phase_data_sum):
if isinstance(proc_ifgs[0], Ifg):
proc_ifgs = proc_ifgs
else:
proc_ifgs = [Ifg(ifg_path) for ifg_path in proc_ifgs]
for ifg in proc_ifgs:
if not ifg.is_open:
ifg.open(readonly=False)
comp = np.isnan(phase_data_sum)
comp = np.ravel(comp, order='F')
if params[cf.PARALLEL]:
log.info("Calculating ref phase using multiprocessing")
ref_phs = Parallel(n_jobs=params[cf.PROCESSES], verbose=joblib_log_level(cf.LOG_LEVEL))(
delayed(_est_ref_phs_ifg_median)(p.phase_data, comp) for p in proc_ifgs
)
for n, ifg in enumerate(proc_ifgs):
ifg.phase_data -= ref_phs[n]
else:
log.info("Calculating ref phase")
ref_phs = np.zeros(len(proc_ifgs))
for n, ifg in enumerate(proc_ifgs):
ref_phs[n] = _est_ref_phs_ifg_median(ifg.phase_data, comp)
return ref_phs
process_ifg_paths = mpiops.array_split(ifg_paths)
ifg_phase_data_sum = mpiops.comm.allreduce(_process_phase_sum(process_ifg_paths), mpiops.sum0_op)
ref_phs = _inner(process_ifg_paths, ifg_phase_data_sum)
return ref_phs
def _update_phase_metadata(ifg):
ifg.meta_data[ifc.PYRATE_REF_PHASE] = ifc.REF_PHASE_REMOVED
ifg.write_modified_phase()
log.debug(f"Reference phase corrected for {ifg.data_path}")
def _est_ref_phs_ifg_median(phase_data, comp):
"""
Convenience function for ref phs estimate method 1 parallelisation
"""
ifgv = np.ravel(phase_data, order='F')
ifgv[comp == 1] = np.nan
return nanmedian(ifgv)
def _update_phase_and_metadata(ifgs, ref_phs):
def __inner(ifg, ref_ph):
ifg.open()
ifg.phase_data -= ref_ph
ifg.meta_data[ifc.PYRATE_REF_PHASE] = ifc.REF_PHASE_REMOVED
ifg.write_modified_phase()
log.debug(f"Reference phase corrected for {ifg.data_path}")
ifg.close()
for i, rp in zip(mpiops.array_split(ifgs), mpiops.array_split(ref_phs)):
__inner(i, rp)
class ReferencePhaseError(Exception):
"""
Generic class for errors in reference phase estimation.
"""
pass
def ref_phase_est_wrapper(params):
"""
Wrapper for reference phase estimation.
"""
ifg_paths = [ifg_path.tmp_sampled_path for ifg_path in params[cf.INTERFEROGRAM_FILES]]
refpx, refpy = params[cf.REFX_FOUND], params[cf.REFY_FOUND]
if len(ifg_paths) < 2:
raise ReferencePhaseError(
"At least two interferograms required for reference phase correction ({len_ifg_paths} "
"provided).".format(len_ifg_paths=len(ifg_paths))
)
# this is not going to be true as we now start with fresh multilooked ifg copies - remove?
if mpiops.run_once(shared.check_correction_status, ifg_paths, ifc.PYRATE_REF_PHASE):
log.debug('Finished reference phase correction')
return
ifgs = [Ifg(ifg_path) for ifg_path in ifg_paths]
# Save reference phase numpy arrays to disk.
ref_phs_file = Configuration.ref_phs_file(params)
if ref_phs_file.exists():
ref_phs = np.load(ref_phs_file)
_update_phase_and_metadata(ifgs, ref_phs)
shared.save_numpy_phase(ifg_paths, params)
return ref_phs, ifgs
if params[cf.REF_EST_METHOD] == 1:
log.info("Calculating reference phase as median of interferogram")
ref_phs = est_ref_phase_ifg_median(ifg_paths, params)
elif params[cf.REF_EST_METHOD] == 2:
log.info('Calculating reference phase in a patch surrounding pixel (x, y): ({}, {})'.format(refpx, refpy))
ref_phs = est_ref_phase_patch_median(ifg_paths, params, refpx, refpy)
else:
raise ReferencePhaseError("No such option, set parameter 'refest' to '1' or '2'.")
if mpiops.rank == MAIN_PROCESS:
collected_ref_phs = np.zeros(len(ifg_paths), dtype=np.float64)
process_indices = mpiops.array_split(range(len(ifg_paths)))
collected_ref_phs[process_indices] = ref_phs
for r in range(1, mpiops.size):
process_indices = mpiops.array_split(range(len(ifg_paths)), r)
this_process_ref_phs = np.zeros(shape=len(process_indices),
dtype=np.float64)
mpiops.comm.Recv(this_process_ref_phs, source=r, tag=r)
collected_ref_phs[process_indices] = this_process_ref_phs
np.save(file=ref_phs_file, arr=collected_ref_phs)
else:
collected_ref_phs = np.empty(len(ifg_paths), dtype=np.float64)
mpiops.comm.Send(ref_phs, dest=MAIN_PROCESS, tag=mpiops.rank)
mpiops.comm.Bcast(collected_ref_phs, root=0)
_update_phase_and_metadata(ifgs, collected_ref_phs)
log.debug('Finished reference phase correction')
mpiops.comm.barrier()
shared.save_numpy_phase(ifg_paths, params)
log.debug("Reference phase computed!")
# Preserve old return value so tests don't break.
return ref_phs, ifgs
| 37.247148
| 114
| 0.669763
|
794a609afe6ea97a40256c190920a59c1c654ffd
| 2,733
|
py
|
Python
|
get_model.py
|
Tan90degrees/Blink-detection
|
d0cfda76730a7fabc5aadd2c39bb2785739d076d
|
[
"MIT"
] | null | null | null |
get_model.py
|
Tan90degrees/Blink-detection
|
d0cfda76730a7fabc5aadd2c39bb2785739d076d
|
[
"MIT"
] | null | null | null |
get_model.py
|
Tan90degrees/Blink-detection
|
d0cfda76730a7fabc5aadd2c39bb2785739d076d
|
[
"MIT"
] | null | null | null |
import requests
import os
import bz2
import getpath
def download_landmarks_model():
models = getpath.get_2rd_path("models")
if os.path.isdir(models):
pass
else:
os.mkdir(models)
model = getpath.get_3rd_path("models", "shape_predictor_68_face_landmarks.dat.bz2")
dat = getpath.get_3rd_path("models", "shape_predictor_68_face_landmarks.dat")
if os.path.exists(dat):
if os.path.isfile(dat):
return
if os.path.exists(model):
if os.path.isfile(model):
print("model already exists!")
else:
print("Downloading model!")
url = "http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2"
r = requests.get(url)
with open(model, "wb") as f:
f.write(r.content)
f.close()
print("Download successful!")
else:
print("Downloading model!")
url = "http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2"
r = requests.get(url)
with open(model, "wb") as f:
f.write(r.content)
f.close()
print("Download successful!")
print("Unzipping!")
with bz2.open(model, "rb") as r, open(dat, "wb") as w:
w.write(r.read())
r.close()
w.close()
print("Unzipped!")
# http://dlib.net/files/mmod_human_face_detector.dat.bz2
def download_face_model():
models = getpath.get_2rd_path("models")
if os.path.isdir(models):
pass
else:
os.mkdir(models)
model = getpath.get_3rd_path("models", "mmod_human_face_detector.dat.bz2")
dat = getpath.get_3rd_path("models", "mmod_human_face_detector.dat")
if os.path.exists(dat):
if os.path.isfile(dat):
return
if os.path.exists(model):
if os.path.isfile(model):
print("model already exists!")
else:
print("Downloading model!")
url = "http://dlib.net/files/mmod_human_face_detector.dat.bz2"
r = requests.get(url)
with open(model, "wb") as f:
f.write(r.content)
f.close()
print("Download successful!")
else:
print("Downloading model!")
url = "http://dlib.net/files/mmod_human_face_detector.dat.bz2"
r = requests.get(url)
with open(model, "wb") as f:
f.write(r.content)
f.close()
print("Download successful!")
print("Unzipping!")
with bz2.open(model, "rb") as r, open(dat, "wb") as w:
w.write(r.read())
r.close()
w.close()
print("Unzipped!")
| 33.329268
| 88
| 0.554336
|
794a60e15438fd30814a70a95cee849927abb198
| 2,803
|
py
|
Python
|
bench/test_attrs_nested.py
|
bibajz/cattrs
|
59edafdac38d4f9acd9ab2769380e3ec128a16a7
|
[
"MIT"
] | 364
|
2016-09-10T16:09:23.000Z
|
2021-10-20T03:26:06.000Z
|
bench/test_attrs_nested.py
|
bibajz/cattrs
|
59edafdac38d4f9acd9ab2769380e3ec128a16a7
|
[
"MIT"
] | 167
|
2016-09-22T08:45:12.000Z
|
2021-10-21T13:34:35.000Z
|
bench/test_attrs_nested.py
|
bibajz/cattrs
|
59edafdac38d4f9acd9ab2769380e3ec128a16a7
|
[
"MIT"
] | 65
|
2016-12-31T11:21:59.000Z
|
2021-09-29T10:07:38.000Z
|
"""Benchmark attrs containing other attrs classes."""
import attr
import pytest
from cattr import Converter, GenConverter, UnstructureStrategy
@pytest.mark.parametrize("converter_cls", [Converter, GenConverter])
@pytest.mark.parametrize(
"unstructure_strat",
[UnstructureStrategy.AS_DICT, UnstructureStrategy.AS_TUPLE],
)
def test_unstructure_attrs_nested(benchmark, converter_cls, unstructure_strat):
c = converter_cls(unstruct_strat=unstructure_strat)
@attr.define
class InnerA:
a: int
b: float
c: str
d: bytes
@attr.define
class InnerB:
a: int
b: float
c: str
d: bytes
@attr.define
class InnerC:
a: int
b: float
c: str
d: bytes
@attr.define
class InnerD:
a: int
b: float
c: str
d: bytes
@attr.define
class InnerE:
a: int
b: float
c: str
d: bytes
@attr.define
class Outer:
a: InnerA
b: InnerB
c: InnerC
d: InnerD
e: InnerE
inst = Outer(
InnerA(1, 1.0, "one", "one".encode()),
InnerB(2, 2.0, "two", "two".encode()),
InnerC(3, 3.0, "three", "three".encode()),
InnerD(4, 4.0, "four", "four".encode()),
InnerE(5, 5.0, "five", "five".encode()),
)
benchmark(c.unstructure, inst)
@pytest.mark.parametrize("converter_cls", [Converter, GenConverter])
@pytest.mark.parametrize(
"unstructure_strat",
[UnstructureStrategy.AS_DICT, UnstructureStrategy.AS_TUPLE],
)
def test_unstruct_attrs_deep_nest(benchmark, converter_cls, unstructure_strat):
c = converter_cls(unstruct_strat=unstructure_strat)
@attr.define
class InnerA:
a: int
b: float
c: str
d: bytes
@attr.define
class InnerB:
a: InnerA
b: InnerA
c: InnerA
d: InnerA
@attr.define
class InnerC:
a: InnerB
b: InnerB
c: InnerB
d: InnerB
@attr.define
class InnerD:
a: InnerC
b: InnerC
c: InnerC
d: InnerC
@attr.define
class InnerE:
a: InnerD
b: InnerD
c: InnerD
d: InnerD
@attr.define
class Outer:
a: InnerE
b: InnerE
c: InnerE
d: InnerE
make_inner_a = lambda: InnerA(1, 1.0, "one", "one".encode())
make_inner_b = lambda: InnerB(*[make_inner_a() for _ in range(4)])
make_inner_c = lambda: InnerC(*[make_inner_b() for _ in range(4)])
make_inner_d = lambda: InnerD(*[make_inner_c() for _ in range(4)])
make_inner_e = lambda: InnerE(*[make_inner_d() for _ in range(4)])
inst = Outer(*[make_inner_e() for _ in range(4)])
benchmark(c.unstructure, inst)
| 21.728682
| 79
| 0.579022
|
794a6151292479d4a60669333a429bea3d8e3738
| 8,184
|
py
|
Python
|
cmdb/asset.py
|
touchgold/adminset
|
3568693a4ea43312a3d3f04c843723b20b50ec93
|
[
"Apache-2.0"
] | 1
|
2018-04-27T07:24:49.000Z
|
2018-04-27T07:24:49.000Z
|
cmdb/asset.py
|
touchgold/adminset
|
3568693a4ea43312a3d3f04c843723b20b50ec93
|
[
"Apache-2.0"
] | null | null | null |
cmdb/asset.py
|
touchgold/adminset
|
3568693a4ea43312a3d3f04c843723b20b50ec93
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import datetime
import sys
from accounts.permission import permission_verify
from cmdb.api import get_object, pages, str2gb
from config.views import get_dir
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.shortcuts import HttpResponse, render
from forms import AssetForm
from models import ASSET_STATUS, ASSET_TYPE, Host, HostGroup, Idc, Cabinet
try:
reload(sys) # Python 2
sys.setdefaultencoding('utf8')
except NameError:
pass # Python 3
@login_required()
@permission_verify()
def asset(request):
temp_name = "cmdb/cmdb-header.html"
webssh_domain = get_dir("webssh_domain")
asset_find = []
idc_info = Idc.objects.all()
host_list = Host.objects.all()
group_info = HostGroup.objects.all()
asset_types = ASSET_TYPE
asset_status = ASSET_STATUS
idc_name = request.GET.get('idc', '')
group_name = request.GET.get('group', '')
asset_type = request.GET.get('asset_type', '')
status = request.GET.get('status', '')
keyword = request.GET.get('keyword', '')
export = request.GET.get("export", '')
group_id = request.GET.get("group_id", '')
cabinet_id = request.GET.get("cabinet_id", '')
idc_id = request.GET.get("idc_id", '')
asset_id_all = request.GET.getlist("id", '')
if group_id:
group = get_object(HostGroup, id=group_id)
if group:
asset_find = Host.objects.filter(group=group)
if cabinet_id:
cabinet = get_object(Cabinet, id=cabinet_id)
if cabinet:
asset_find = Host.objects.filter(cabinet=cabinet)
elif idc_id:
idc = get_object(Idc, id=idc_id)
if idc:
asset_find = Host.objects.filter(idc=idc)
else:
asset_find = Host.objects.all()
if idc_name:
asset_find = asset_find.filter(idc__name__contains=idc_name)
if group_name:
get_group = HostGroup.objects.get(name=group_name)
asset_find = get_group.serverList.all()
if asset_type:
asset_find = asset_find.filter(asset_type__contains=asset_type)
if status:
asset_find = asset_find.filter(status__contains=status)
if keyword:
asset_find = asset_find.filter(
Q(hostname__contains=keyword) |
Q(ip__contains=keyword) |
Q(other_ip__contains=keyword) |
Q(os__contains=keyword) |
Q(vendor__contains=keyword) |
Q(cpu_model__contains=keyword) |
Q(cpu_num__contains=keyword) |
Q(memory__contains=keyword) |
Q(disk__contains=keyword) |
Q(sn__contains=keyword) |
Q(position__contains=keyword) |
Q(memo__contains=keyword))
if export:
response = create_asset_excel(export, asset_id_all)
return response
assets_list, p, assets, page_range, current_page, show_first, show_end, end_page = pages(asset_find, request)
return render(request, 'cmdb/index.html', locals())
def create_asset_excel(export, asset_id_all):
if export == "true":
if asset_id_all:
asset_find = []
for asset_id in asset_id_all:
asset_item = get_object(Host, id=asset_id)
if asset_item:
asset_find.append(asset_item)
response = HttpResponse(content_type='text/csv')
now = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M')
file_name = 'adminset_cmdb_' + now + '.csv'
response['Content-Disposition'] = "attachment; filename="+file_name
writer = csv.writer(response)
writer.writerow([str2gb(u'主机名'), str2gb(u'IP地址'), str2gb(u'其它IP'), str2gb(u'所在机房'),
str2gb(u'资产编号'), str2gb(u'设备类型'), str2gb(u'设备状态'), str2gb(u'操作系统'),
str2gb(u'设备厂商'), str2gb(u'CPU型号'), str2gb(u'CPU核数'), str2gb(u'内存大小'),
str2gb(u'硬盘信息'), str2gb(u'SN号码'), str2gb(u'所在位置'),
str2gb(u'备注信息')])
for h in asset_find:
if h.asset_type:
at_num = int(h.asset_type)
a_type = ASSET_TYPE[at_num-1][1]
else:
a_type = ""
if h.status:
at_as = int(h.status)
a_status = ASSET_STATUS[at_as-1][1]
else:
a_status = ""
writer.writerow([str2gb(h.hostname), h.ip, h.other_ip, str2gb(h.idc), str2gb(h.asset_no),
str2gb(a_type), str2gb(a_status), str2gb(h.os), str2gb(h.vendor),
str2gb(h.cpu_model), str2gb(h.cpu_num), str2gb(h.memory), str2gb(h.disk),
str2gb(h.sn), str2gb(h.position), str2gb(h.memo)])
return response
if export == "all":
host = Host.objects.all()
response = HttpResponse(content_type='text/csv')
now = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M')
file_name = 'adminset_cmdb_' + now + '.csv'
response['Content-Disposition'] = "attachment; filename=" + file_name
writer = csv.writer(response)
writer.writerow([str2gb('主机名'), str2gb('IP地址'), str2gb('其它IP'), str2gb('所在机房'), str2gb('资产编号'),
str2gb('设备类型'), str2gb('设备状态'), str2gb('操作系统'), str2gb('设备厂商'), str2gb('CPU型号'),
str2gb('CPU核数'), str2gb('内存大小'), str2gb('硬盘信息'), str2gb('SN号码'),
str2gb('所在位置'), str2gb('备注信息')])
for h in host:
if h.asset_type:
at_num = int(h.asset_type)
a_type = ASSET_TYPE[at_num-1][1]
else:
a_type = ""
if h.status:
at_as = int(h.status)
a_status = ASSET_STATUS[at_as-1][1]
else:
a_status = ""
writer.writerow([str2gb(h.hostname), h.ip, h.other_ip, str2gb(h.idc), str2gb(h.asset_no), str2gb(a_type),
str2gb(a_status), str2gb(h.os), str2gb(h.vendor), str2gb(h.cpu_model), str2gb(h.cpu_num),
str2gb(h.memory), str2gb(h.disk), str2gb(h.sn), str2gb(h.position),
str2gb(h.memo)])
return response
@login_required()
@permission_verify()
def asset_add(request):
temp_name = "cmdb/cmdb-header.html"
if request.method == "POST":
a_form = AssetForm(request.POST)
if a_form.is_valid():
a_form.save()
tips = u"增加成功!"
display_control = ""
else:
tips = u"增加失败!"
display_control = ""
return render(request, "cmdb/asset_add.html", locals())
else:
display_control = "none"
a_form = AssetForm()
return render(request, "cmdb/asset_add.html", locals())
@login_required()
@permission_verify()
def asset_del(request):
asset_id = request.GET.get('id', '')
if asset_id:
Host.objects.filter(id=asset_id).delete()
if request.method == 'POST':
asset_batch = request.GET.get('arg', '')
asset_id_all = str(request.POST.get('asset_id_all', ''))
if asset_batch:
for asset_id in asset_id_all.split(','):
asset_item = get_object(Host, id=asset_id)
asset_item.delete()
return HttpResponse(u'删除成功')
@login_required
@permission_verify()
def asset_edit(request, ids):
status = 0
asset_types = ASSET_TYPE
obj = get_object(Host, id=ids)
if request.method == 'POST':
af = AssetForm(request.POST, instance=obj)
if af.is_valid():
af.save()
status = 1
else:
status = 2
else:
af = AssetForm(instance=obj)
return render(request, 'cmdb/asset_edit.html', locals())
@login_required
@permission_verify()
def server_detail(request, ids):
host = Host.objects.get(id=ids)
try:
disk = eval(host.disk)
except Exception as e:
print(e)
return render(request, 'cmdb/server_detail.html', locals())
| 36.535714
| 118
| 0.580156
|
794a61aa0e8c009ce7e26eed796c397ad9c05029
| 764
|
py
|
Python
|
source/pyromocc/tests/pyromocc/test_calibration.py
|
SINTEFMedtek/libromocc
|
65a10849401cec02fc1c9ac8b1bdebbbfc4ff1c0
|
[
"BSD-2-Clause"
] | 2
|
2019-07-03T10:02:11.000Z
|
2020-04-20T09:01:42.000Z
|
source/pyromocc/tests/pyromocc/test_calibration.py
|
SINTEFMedtek/libromocc
|
65a10849401cec02fc1c9ac8b1bdebbbfc4ff1c0
|
[
"BSD-2-Clause"
] | 4
|
2019-08-05T07:55:22.000Z
|
2020-05-11T11:05:59.000Z
|
source/pyromocc/tests/pyromocc/test_calibration.py
|
SINTEFMedtek/libromocc
|
65a10849401cec02fc1c9ac8b1bdebbbfc4ff1c0
|
[
"BSD-2-Clause"
] | 1
|
2020-06-22T09:55:47.000Z
|
2020-06-22T09:55:47.000Z
|
from unittest import TestCase
import numpy as np
from pyromocc import CalibrationMethods
class TestCalibration(TestCase):
def setUp(self) -> None:
pass
def test_calibration_shah(self):
poses_a = np.random.random((100, 4, 4))
poses_b = poses_a
calib_matrices = CalibrationMethods.calibration_shah(poses_a, poses_b)
assert np.allclose(calib_matrices.pose_x, np.eye(4, 4))
assert np.allclose(calib_matrices.pose_y, np.eye(4, 4))
calib_errors = CalibrationMethods.estimate_calibration_error(calib_matrices.pose_x, calib_matrices.pose_y,
poses_a, poses_b)
print(calib_errors.translation_error, calib_errors.rotation_error)
| 36.380952
| 114
| 0.668848
|
794a61d9fe3283a77f6f470a261d1124cd8c3132
| 3,743
|
py
|
Python
|
src/Django/venv/ShopCart/shopcart/shopcart/settings/base.py
|
KarateJB/Python.Practice
|
a5f00f669dc4b815601c093ce0753a0a82b4328a
|
[
"MIT"
] | 1
|
2020-08-14T07:21:05.000Z
|
2020-08-14T07:21:05.000Z
|
src/Django/venv/ShopCart/shopcart/shopcart/settings/base.py
|
KarateJB/Python.Practice
|
a5f00f669dc4b815601c093ce0753a0a82b4328a
|
[
"MIT"
] | null | null | null |
src/Django/venv/ShopCart/shopcart/shopcart/settings/base.py
|
KarateJB/Python.Practice
|
a5f00f669dc4b815601c093ce0753a0a82b4328a
|
[
"MIT"
] | 3
|
2018-04-08T13:35:20.000Z
|
2019-09-01T04:59:03.000Z
|
"""
Django settings for shopcart project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = '!&r6$4bw+yhf6_+z0bfay%t%s051e=!*0kii0+dev_5!wwea46'
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
# ALLOWED_HOSTS = ['localhost', '127.0.0.1','http://localhost']
# Application definition
INSTALLED_APPS = [
'app', #PUT you app name here
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'shopcart.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'app/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'shopcart.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'sqlserver_ado',
'HOST':'LEIASKYWALKER\\SQLEXPRESS',
'NAME': 'Shopcart',
'USER':'shopcart',
'PASSWORD':'shopcart',
#'PORT':'1433',
'OPTIONS':{
'provider':'SQLOLEDB',
# 'extra_params':'DataTypeCompatibility=80;MARS Connection=True'
}
},
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
# STATICFILES_DIRS = ("../app/static")
STATIC_URL = '/static/'
STATIC_ROOT = ''
| 27.123188
| 91
| 0.67486
|
794a62688747ff21da3408067411216ed0207a91
| 20,826
|
py
|
Python
|
unittests/test_rfc4287.py
|
UKTradeInvestment/pyslet
|
70f9731df4d874379649eeacb79d8a6583b3dcaa
|
[
"BSD-3-Clause"
] | 2
|
2016-09-16T11:17:43.000Z
|
2016-10-19T11:15:53.000Z
|
unittests/test_rfc4287.py
|
UKTradeInvestment/pyslet
|
70f9731df4d874379649eeacb79d8a6583b3dcaa
|
[
"BSD-3-Clause"
] | 2
|
2018-06-29T10:53:50.000Z
|
2021-04-06T07:55:54.000Z
|
unittests/test_rfc4287.py
|
UKTradeInvestment/pyslet
|
70f9731df4d874379649eeacb79d8a6583b3dcaa
|
[
"BSD-3-Clause"
] | 2
|
2016-10-13T15:12:50.000Z
|
2021-01-13T11:58:18.000Z
|
#! /usr/bin/env python
import os
import unittest
from io import BytesIO
import pyslet.rfc4287 as atom
from pyslet import iso8601
from pyslet.py2 import dict_keys
from pyslet.xml import namespace as xmlns
def suite():
return unittest.TestSuite((
unittest.makeSuite(AtomElementTests, 'test'),
unittest.makeSuite(AtomTextTests, 'test'),
unittest.makeSuite(PersonTests, 'test'),
unittest.makeSuite(AtomDateTests, 'test'),
unittest.makeSuite(FeedTests, 'test'),
unittest.makeSuite(EntryTests, 'test'),
unittest.makeSuite(Atom4287Tests, 'test')
))
EXAMPLE_1 = b"""<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title>Example Feed</title>
<link href="http://example.org/"/>
<updated>2003-12-13T18:30:02Z</updated>
<author>
<name>John Doe</name>
</author>
<id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id>
<entry>
<title>Atom-Powered Robots Run Amok</title>
<link href="http://example.org/2003/12/13/atom03"/>
<id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id>
<updated>2003-12-13T18:30:02Z</updated>
<summary>Some text.</summary>
</entry>
</feed>"""
EXAMPLE_2 = b"""<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title type="text">dive into mark</title>
<subtitle type="html">
A <em>lot</em> of effort went into making this effortless
</subtitle>
<updated>2005-07-31T12:29:29Z</updated>
<id>tag:example.org,2003:3</id>
<link rel="alternate" type="text/html"
hreflang="en" href="http://example.org/"/>
<link rel="self" type="application/atom+xml"
href="http://example.org/feed.atom"/>
<rights>Copyright (c) 2003, Mark Pilgrim</rights>
<generator uri="http://www.example.com/" version="1.0">
Example Toolkit
</generator>
<entry>
<title>Atom draft-07 snapshot</title>
<link rel="alternate" type="text/html"
href="http://example.org/2005/04/02/atom"/>
<link rel="enclosure" type="audio/mpeg" length="1337"
href="http://example.org/audio/ph34r_my_podcast.mp3"/>
<id>tag:example.org,2003:3.2397</id>
<updated>2005-07-31T12:29:29Z</updated>
<published>2003-12-13T08:29:29-04:00</published>
<author>
<name>Mark Pilgrim</name>
<uri>http://example.org/</uri>
<email>f8dy@example.com</email>
</author>
<contributor>
<name>Sam Ruby</name>
</contributor>
<contributor>
<name>Joe Gregorio</name>
</contributor>
<content type="xhtml" xml:lang="en"
xml:base="http://diveintomark.org/">
<div xmlns="http://www.w3.org/1999/xhtml">
<p><i>[Update: The Atom draft is finished.]</i></p>
</div>
</content>
</entry>
</feed>"""
class Atom4287Tests(unittest.TestCase):
def test_constants(self):
self.assertTrue(atom.ATOM_NAMESPACE == "http://www.w3.org/2005/Atom",
"Wrong atom namespace: %s" % atom.ATOM_NAMESPACE)
self.assertTrue(atom.ATOM_MIMETYPE == "application/atom+xml",
"Wrong atom mime type: %s" % atom.ATOM_MIMETYPE)
class AtomElementTests(unittest.TestCase):
def test_constructor(self):
e = atom.AtomElement(None)
self.assertTrue(e.parent is None, 'empty parent on construction')
self.assertTrue(e.xmlname is None, 'element name on construction')
self.assertTrue(e.get_base() is None,
"xml:base present on construction")
self.assertTrue(e.get_lang() is None,
"xml:lang present on construction")
attrs = e.get_attributes()
self.assertTrue(sum(1 for k in dict_keys(attrs)) == 0,
"Attributes present on construction")
e2 = atom.AtomElement(e)
self.assertTrue(e2.parent is e, 'non-empty parent on construction')
def test_get_set(self):
e = atom.AtomElement(None)
e.set_base("http://www.example.com/")
self.assertTrue(e.get_base() == "http://www.example.com/",
"Get/Set example xml:base value")
e.set_lang("en-US")
self.assertTrue(e.get_lang() == "en-US",
"Get/Set example xml:lang value")
attrs = e.get_attributes()
self.assertTrue(sum(1 for k in dict_keys(attrs)) == 2,
"Two attributes expected")
self.assertTrue(attrs[(xmlns.XML_NAMESPACE, 'base')] ==
"http://www.example.com/", "Base attribute")
self.assertTrue(attrs[(xmlns.XML_NAMESPACE, 'lang')] == "en-US",
"Lang attribute")
e.set_base(None)
attrs = e.get_attributes()
self.assertTrue(e.get_base() is None, "Get/Set empty xml:base value")
self.assertTrue(sum(1 for k in dict_keys(attrs)) == 1,
"One attribute expected")
e.set_lang(None)
attrs = e.get_attributes()
self.assertTrue(e.get_lang() is None, "Get/Set empty xml:lang value")
self.assertTrue(sum(1 for k in dict_keys(attrs)) == 0,
"No attributes expected")
class AtomTextTests(unittest.TestCase):
"""Untested:
If the value is "text", the content of the Text construct MUST NOT
contain child elements.
If the value of "type" is "html", the content of the Text construct
MUST NOT contain child elements
If the value of "type" is "xhtml", the content of the Text construct
MUST be a single XHTML div element [XHTML]
The XHTML div element itself MUST NOT be considered part of the content."""
def test_constructor(self):
text = atom.Text(None)
self.assertTrue(text.xmlname is None, 'element name on construction')
self.assertTrue(isinstance(text, atom.AtomElement),
"Text not an AtomElement")
self.assertTrue(text.get_base() is None,
"xml:base present on construction")
self.assertTrue(text.get_lang() is None,
"xml:lang present on construction")
attrs = text.get_attributes()
self.assertTrue(sum(1 for k in dict_keys(attrs)) == 1,
"Attributes present on construction")
self.assertTrue(text.get_value() == '',
"Content present on construction")
def test_string_value(self):
text = atom.Text(None)
text.set_value("Some text")
self.assertTrue(text.get_value() == "Some text",
"String constructor data")
self.assertTrue(text.type == atom.TextType.text,
"Default text type not 'text' on construction")
text = atom.Text(None)
text.set_value("Some other text", atom.TextType.xhtml)
self.assertTrue(text.get_value() == 'Some other text',
"String constructor data: found %s" % text.get_value())
self.assertTrue(text.type == atom.TextType.xhtml,
"Override text type on construction")
def test_types(self):
"""Text constructs MAY have a "type" attribute. When present,
the value MUST be one of "text", "html", or "xhtml". If the
"type" attribute is not provided, Atom Processors MUST behave as
though it were present with a value of "text"."""
text = atom.Text(None)
attrs = text.get_attributes()
self.assertTrue(text.type == atom.TextType.text and
attrs[(xmlns.NO_NAMESPACE, 'type')] == "text",
"Default text type not 'text' on construction")
text.set_value('<p>Hello', atom.TextType.html)
self.assertTrue(text.type == atom.TextType.html,
"html text type failed")
text.set_value('<p>Hello</p>', atom.TextType.xhtml)
self.assertTrue(text.type == atom.TextType.xhtml,
"xhtml text type failed")
try:
text.set_value('Hello\\par ', 'rtf')
self.fail("rtf text type failed to raise error")
except ValueError:
pass
class PersonTests(unittest.TestCase):
"""Untested:
The "atom:name" element's content conveys a human-readable name for
the person. The content of atom:name is Language-Sensitive. Person
constructs MUST contain exactly one "atom:name" element.
Person constructs MAY contain an atom:uri element, but MUST
NOT contain more than one.
Person constructs MAY contain an
atom:email element, but MUST NOT contain more than one. Its content
MUST conform to the "addr-spec" production in [RFC2822]."""
def test_constructor(self):
person = atom.Person(None)
self.assertTrue(person.xmlname is None, 'element name on construction')
self.assertTrue(isinstance(person, atom.AtomElement),
"Person not an AtomElement")
self.assertTrue(person.get_base() is None,
"xml:base present on construction")
self.assertTrue(person.get_lang() is None,
"xml:lang present on construction")
attrs = person.get_attributes()
self.assertTrue(sum(1 for k in dict_keys(attrs)) == 0,
"Attributes present on construction")
self.assertTrue(isinstance(person.Name, atom.Name),
"Name on construction")
self.assertTrue(person.URI is None, "URI on construction")
self.assertTrue(person.Email is None, "Email on construction")
class AtomDateTests(unittest.TestCase):
"""Untested:
Note that there MUST NOT be any white space in a Date construct or
in any IRI. Some XML-emitting implementations erroneously insert
white space around values by default, and such implementations will
emit invalid Atom Documents.
In addition, an uppercase "T" character MUST be used to separate
date and time, and an uppercase "Z" character MUST be present in the
absence of a numeric time zone offset."""
def test_atom_date_constructor(self):
date = atom.Date(None)
self.assertTrue(date.xmlname is None, 'element name on construction')
self.assertTrue(isinstance(date, atom.AtomElement),
"Date not an AtomElement")
self.assertTrue(date.get_base() is None,
"xml:base present on construction")
self.assertTrue(date.get_lang() is None,
"xml:lang present on construction")
attrs = date.get_attributes()
self.assertTrue(sum(1 for k in dict_keys(attrs)) == 0,
"Attributes present on construction")
self.assertTrue(isinstance(date.get_value(), iso8601.TimePoint),
"Value not a TimePoint")
class FeedTests(unittest.TestCase):
def setUp(self): # noqa
self.cwd = os.getcwd()
def tearDown(self): # noqa
os.chdir(self.cwd)
def test_constructor(self):
feed = atom.Feed(None)
self.assertTrue(
isinstance(feed, atom.AtomElement), "Feed not an AtomElement")
self.assertTrue(feed.xmlname == "feed", "Feed XML name")
self.assertTrue(
feed.get_base() is None, "xml:base present on construction")
self.assertTrue(
feed.get_lang() is None, "xml:lang present on construction")
self.assertTrue(len(feed.Entry) == 0, "Non-empty feed on construction")
attrs = feed.get_attributes()
self.assertTrue(sum(1 for k in dict_keys(attrs)) == 0,
"Attributes present on construction")
def test_read_xml(self):
doc = atom.AtomDocument()
doc.read(src=BytesIO(EXAMPLE_1))
feed = doc.root
self.assertTrue(isinstance(feed, atom.Feed), "Example 1 not a feed")
title = feed.Title
self.assertTrue(isinstance(title, atom.Text) and
title.get_value() == "Example Feed",
"Example 1 title: " + str(title))
link = feed.Link[0]
self.assertTrue(isinstance(link, atom.Link) and link.href ==
"http://example.org/", "Example 1 link")
updated = feed.Updated
self.assertTrue(
isinstance(updated.get_value(), iso8601.TimePoint) and
updated.get_value() ==
iso8601.TimePoint.from_str("2003-12-13T18:30:02Z"),
"Example 1 updated: found %s" % updated.get_value())
author = feed.Author[0]
self.assertTrue(
isinstance(author, atom.Person) and author.Name.get_value() ==
"John Doe", "Example 1 author")
self.assertTrue(
isinstance(feed.AtomId, atom.AtomId) and
feed.AtomId.get_value() ==
"urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6", "Example 1 id")
entries = feed.Entry
self.assertTrue(
len(entries) == 1,
"Example 1: wrong number of entries (%i)" % len(entries))
entry = entries[0]
title = entry.Title
self.assertTrue(
isinstance(title, atom.Text) and title.get_value() ==
"Atom-Powered Robots Run Amok", "Example 1 entry title")
link = entry.Link[0]
self.assertTrue(isinstance(link, atom.Link) and link.href ==
"http://example.org/2003/12/13/atom03",
"Example 1 entry link")
self.assertTrue(
isinstance(entry.AtomId, atom.AtomId) and
entry.AtomId.get_value() ==
"urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a",
"Example 1 entry id")
updated = entry.Updated
self.assertTrue(isinstance(updated, atom.Date) and
updated.get_value() ==
iso8601.TimePoint.from_str("2003-12-13T18:30:02Z"),
"Example 1 entry updated")
summary = entry.Summary
self.assertTrue(isinstance(summary, atom.Text) and
summary.get_value() == "Some text.",
"Example 1 entry summary")
doc.read(src=BytesIO(EXAMPLE_2))
feed = doc.root
subtitle = feed.Subtitle
self.assertTrue(
isinstance(subtitle, atom.Subtitle) and
subtitle.type == atom.TextType.html and
subtitle.get_value().strip() ==
"A <em>lot</em> of effort went into making this effortless",
"Example 2 subtitle")
links = feed.Link
self.assertTrue(
links[0].rel == "alternate" and
links[0].type == "text/html" and
links[0].hreflang == "en" and
links[0].href == "http://example.org/",
"Example 2, link 0 attributes")
self.assertTrue(
links[1].rel == "self" and
links[1].type == "application/atom+xml" and
links[1].hreflang is None and
links[1].href == "http://example.org/feed.atom",
"Example 2, link 1 attributes")
rights = feed.Rights
self.assertTrue(
isinstance(rights, atom.Rights) and rights.get_value() ==
"Copyright (c) 2003, Mark Pilgrim", "Example 2, rights")
generator = feed.Generator
self.assertTrue(
isinstance(generator, atom.Generator) and
generator.uri == "http://www.example.com/" and
generator.version == "1.0" and
generator.get_value().strip() == "Example Toolkit",
"Example 2, generator")
"""<entry>
<title>Atom draft-07 snapshot</title>
<link rel="alternate" type="text/html"
<href="http://example.org/2005/04/02/atom"/>
<link rel="enclosure" type="audio/mpeg" length="1337"
<href="http://example.org/audio/ph34r_my_podcast.mp3"/>
<id>tag:example.org,2003:3.2397</id>
<updated>2005-07-31T12:29:29Z</updated>
<published>2003-12-13T08:29:29-04:00</published>
<author>
<name>Mark Pilgrim</name>
<uri>http://example.org/</uri>
<email>f8dy@example.com</email>
</author>
<contributor>
<name>Sam Ruby</name>
</contributor>
<contributor>
<name>Joe Gregorio</name>
</contributor>
<content type="xhtml" xml:lang="en"
<xml:base="http://diveintomark.org/">
<div xmlns="http://www.w3.org/1999/xhtml">
<p><i>[Update: The Atom draft is finished.]</i></p>
</div>
</content>
</entry>
</feed>"""
def test_constraint1(self):
"""TODO
* atom:feed elements MUST contain one or more atom:author elements,
unless all of the atom:feed element's child atom:entry elements
contain at least one atom:author element.
* atom:feed elements MUST NOT contain more than one atom:generator
element.
* atom:feed elements MUST NOT contain more than one atom:icon element.
* atom:feed elements MUST NOT contain more than one atom:logo element.
* atom:feed elements MUST contain exactly one atom:id element.
* atom:feed elements SHOULD contain one atom:link element with a rel
attribute value of "self". This is the preferred URI for retrieving
Atom Feed Documents representing this Atom feed.
* atom:feed elements MUST NOT contain more than one atom:link element
with a rel attribute value of "alternate" that has the same
combination of type and hreflang attribute values.
* atom:feed elements MAY contain additional atom:link elements beyond
those described above.
* atom:feed elements MUST NOT contain more than one atom:rights
element.
* atom:feed elements MUST NOT contain more than one atom:subtitle
element.
* atom:feed elements MUST contain exactly one atom:title element.
* atom:feed elements MUST contain exactly one atom:updated element."""
pass
class EntryTests(unittest.TestCase):
def setUp(self): # noqa
self.feed = atom.Feed(None)
def tearDown(self): # noqa
pass
def test_constructor(self):
entry = atom.Entry(None)
self.assertTrue(
isinstance(entry, atom.AtomElement), "Entry not an AtomElement")
self.assertTrue(
entry.get_base() is None, "xml:base present on construction")
self.assertTrue(
entry.get_lang() is None, "xml:lang present on construction")
attrs = entry.get_attributes()
self.assertTrue(sum(1 for k in dict_keys(attrs)) == 0,
"Attributes present on construction")
def test_constraints(self):
"""TODO
* atom:entry elements MUST contain one or more atom:author elements,
unless the atom:entry contains an atom:source element that contains
an atom:author element or, in an Atom Feed Document, the atom:feed
element contains an atom:author element itself.
* atom:entry elements MAY contain any number of atom:category elements.
* atom:entry elements MUST NOT contain more than one atom:content
element.
* atom:entry elements MAY contain any number of atom:contributor
elements.
* atom:entry elements MUST contain exactly one atom:id element.
* atom:entry elements that contain no child atom:content element MUST
contain at least one atom:link element with a rel attribute value of
"alternate".
* atom:entry elements MUST NOT contain more than one atom:link element
with a rel attribute value of "alternate" that has the same
combination of type and hreflang attribute values.
* atom:entry elements MAY contain additional atom:link elements beyond
those described above.
* atom:entry elements MUST NOT contain more than one atom:published
element.
* atom:entry elements MUST NOT contain more than one atom:rights
element.
* atom:entry elements MUST NOT contain more than one atom:source
element.
* atom:entry elements MUST contain an atom:summary element in either
of the following cases:
- the atom:entry contains an atom:content that has a "src"
attribute (and is thus empty).
- the atom:entry contains content that is encoded in Base64; i.e.,
the "type" attribute of atom:content is a MIME media type
[MIMEREG], but is not an XML media type [RFC3023], does not
begin with "text/", and does not end with "/xml" or "+xml".
* atom:entry elements MUST NOT contain more than one atom:summary
element.
* atom:entry elements MUST contain exactly one atom:title element.
* atom:entry elements MUST contain exactly one atom:updated element.
"""
pass
if __name__ == "__main__":
unittest.main()
| 39.668571
| 79
| 0.615289
|
794a62693f1665ee6672b50e7d20f40575d2c150
| 27,778
|
py
|
Python
|
prod-stack.py
|
Olympic1/NetKAN-Infra
|
ddad74c4942664e22719930a71ff5cc43f229352
|
[
"MIT"
] | null | null | null |
prod-stack.py
|
Olympic1/NetKAN-Infra
|
ddad74c4942664e22719930a71ff5cc43f229352
|
[
"MIT"
] | null | null | null |
prod-stack.py
|
Olympic1/NetKAN-Infra
|
ddad74c4942664e22719930a71ff5cc43f229352
|
[
"MIT"
] | null | null | null |
import os
import sys
from troposphere import GetAtt, Output, Ref, Template, Sub, Base64
from troposphere.iam import Group, Policy, PolicyType, Role, InstanceProfile
from troposphere.sqs import Queue
from troposphere.dynamodb import Table, KeySchema, AttributeDefinition, \
ProvisionedThroughput
from troposphere.ecs import Cluster, TaskDefinition, ContainerDefinition, \
Service, Secret, Environment, DeploymentConfiguration, Volume, \
Host, MountPoint, PortMapping, ContainerDependency
from troposphere.ec2 import Instance, CreditSpecification, Tag, \
BlockDeviceMapping, EBSBlockDevice
from troposphere.cloudformation import Init, InitFile, InitFiles, \
InitConfig, InitService, Metadata
from troposphere.events import Rule, Target, EcsParameters
from troposphere.route53 import RecordSetType
ZONE_ID = os.environ.get('CKAN_ZONEID', False)
BOT_FQDN = 'netkan.ksp-ckan.space'
EMAIL = 'domains@ksp-ckan.space'
PARAM_NAMESPACE = '/NetKAN/Indexer/'
NETKAN_REMOTE = 'git@github.com:KSP-CKAN/NetKAN.git'
NETKAN_USER = 'KSP-CKAN'
NETKAN_REPO = 'NetKAN'
CKANMETA_REMOTE = 'git@github.com:KSP-CKAN/CKAN-meta.git'
CKANMETA_USER = 'KSP-CKAN'
CKANMETA_REPO = 'CKAN-meta'
STATUS_BUCKET = 'status.ksp-ckan.space'
status_key = 'status/netkan.json'
if not ZONE_ID:
print('Zone ID Required from EnvVar `CKAN_ZONEID`')
sys.exit()
t = Template()
t.set_description("Generate NetKAN Infrastructure CF Template")
# Inbound + Outbound SQS Queues
# Inbound: Scheduler Write, Inflation Read
# Outbound: Inflator Write, Indexer Read
inbound = t.add_resource(Queue("NetKANInbound",
QueueName="Inbound.fifo",
ReceiveMessageWaitTimeSeconds=20,
FifoQueue=True))
outbound = t.add_resource(Queue("NetKANOutbound",
QueueName="Outbound.fifo",
ReceiveMessageWaitTimeSeconds=20,
FifoQueue=True))
for queue in [inbound, outbound]:
t.add_output([
Output(
"{}QueueURL".format(queue.title),
Description="{} SQS Queue URL".format(queue.title),
Value=Ref(queue)
),
Output(
"{}QueueARN".format(queue.title),
Description="ARN of {} SQS Queue".format(queue.title),
Value=GetAtt(queue, "Arn")
),
])
# DyanamoDB: NetKAN Status
netkan_db = t.add_resource(Table(
"NetKANStatus",
AttributeDefinitions=[
AttributeDefinition(
AttributeName="ModIdentifier",
AttributeType="S"
),
],
KeySchema=[
KeySchema(
AttributeName="ModIdentifier",
KeyType="HASH"
)
],
TableName="NetKANStatus",
ProvisionedThroughput=ProvisionedThroughput(
# The free tier allows for 25 R/W Capacity Units
# 5 allocated already for dev testing
ReadCapacityUnits=20,
WriteCapacityUnits=20
)
))
t.add_output(Output(
"TableName",
Value=Ref(netkan_db),
Description="Table name of the newly create DynamoDB table",
))
# Instance Role for Prod Indexing Instance to be able to
# access the relevant AWS resources. We can lock it all
# down to the container level, but this is unnecessary for
# now.
netkan_role = t.add_resource(Role(
"NetKANProdRole",
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": [
"ec2.amazonaws.com"
]
},
"Action": [
"sts:AssumeRole"
]
}
]
},
ManagedPolicyArns=[
"arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role",
],
Policies=[
Policy(
PolicyName="SQSProdPolicy",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sqs:SendMessage",
"sqs:DeleteMessage",
"sqs:PurgeQueue",
"sqs:ReceiveMessage",
"sqs:GetQueueUrl",
"sqs:GetQueueAttributes",
],
"Resource": [
GetAtt(inbound, "Arn"),
GetAtt(outbound, "Arn")
]
},
{
"Effect": "Allow",
"Action": "sqs:ListQueues",
"Resource": "*",
},
],
}
),
Policy(
PolicyName="DynamoDBProdPolicy",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"dynamodb:DescribeTable",
"dynamodb:GetItem",
"dynamodb:Query",
"dynamodb:PutItem",
"dynamodb:UpdateItem",
"dynamodb:Scan",
"dynamodb:BatchWriteItem",
],
"Resource": [
GetAtt(netkan_db, "Arn")
]
},
{
"Effect": "Allow",
"Action": "dynamodb:ListTables",
"Resource": "*",
},
],
}
),
Policy(
PolicyName="S3StatusAccessProd",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket",
],
"Resource": [
"arn:aws:s3:::status.ksp-ckan.space/*"
]
},
],
}
),
Policy(
PolicyName="CertbotProd",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones",
"route53:GetChange"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets"
],
"Resource": [
"arn:aws:route53:::hostedzone/{}".format(
ZONE_ID
),
]
}
],
}
),
Policy(
PolicyName="AllowCloudWatchMetrics",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"cloudwatch:GetMetricStatistics",
],
"Effect": "Allow",
"Resource": "*"
}
]
}
),
Policy(
PolicyName="AllowWebhooksRestart",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"ecs:ListServices",
],
"Effect": "Allow",
"Resource": "*",
},
{
"Action": [
"ecs:DescribeServices",
],
"Effect": "Allow",
"Resource": Sub(
'arn:aws:ecs:${AWS::Region}:${AWS::AccountId}:service/NetKANCluster/${service}',
service=GetAtt('WebhooksService', 'Name'),
)
},
{
"Action": [
"ecs:UpdateService",
],
"Effect": "Allow",
"Resource": Sub(
'arn:aws:ecs:${AWS::Region}:${AWS::AccountId}:service/NetKANCluster/${service}',
service=GetAtt('WebhooksService', 'Name'),
)
},
]
}
)
]
))
netkan_profile = t.add_resource(InstanceProfile(
"NetKANProdProfile",
Roles=[Ref(netkan_role)]
))
# To Access the Secrets manager, the ecs agent needs to AsssumeRole permission
# regardless of what the instance can access.
netkan_ecs_role = t.add_resource(Role(
"NetKANProdEcsRole",
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ecs-tasks.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
},
Policies=[
Policy(
PolicyName="AllowParameterAccess",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ssm:DescribeParameters"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"ssm:GetParameters"
],
"Resource": Sub(
"arn:aws:ssm:${AWS::Region}:${AWS::AccountId}:parameter${ns}*",
ns=PARAM_NAMESPACE
)
}
]
}
)
]
))
# To be able to schedule tasks, the scheduler needs to be allowed to perform
# the tasks.
scheduler_resources = []
for task in [
'Scheduler', 'SchedulerWebhooksPass', 'CertBot', 'StatusDumper',
'DownloadCounter', 'TicketCloser', 'AutoFreezer']:
scheduler_resources.append(Sub(
'arn:aws:ecs:*:${AWS::AccountId}:task-definition/NetKANBot${Task}:*',
Task=task
))
netkan_scheduler_role = t.add_resource(Role(
"NetKANProdSchedulerRole",
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "events.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
},
Policies=[
Policy(
PolicyName="AllowEcsTaskScheduling",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ecs:RunTask"
],
"Resource": scheduler_resources,
"Condition": {
"ArnLike": {
"ecs:cluster": GetAtt('NetKANCluster', 'Arn')
}
}
},
{
"Effect": "Allow",
"Action": "iam:PassRole",
"Resource": [
"*"
],
"Condition": {
"StringLike": {
"iam:PassedToService": "ecs-tasks.amazonaws.com"
}
}
}
]
}
)
]
))
# Build Account Permissions
# It's useful for the CI to be able to update services upon build, there
# is a service account with keys that will be exposed to CI for allowing
# redeployment of services.
ksp_builder_group = t.add_resource(Group("KspCkanBuilderGroup"))
builder_services = []
for service in ['Indexer', 'Inflator', 'Webhooks']:
builder_services.append(
Sub(
'arn:aws:ecs:${AWS::Region}:${AWS::AccountId}:service/NetKANCluster/${service}',
service=GetAtt('{}Service'.format(service), 'Name'),
)
)
t.add_resource(PolicyType(
"KspCkanBuilderRole",
PolicyName="KspCkanBuilder",
Groups=[Ref(ksp_builder_group)],
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"ecs:ListServices",
],
"Effect": "Allow",
"Resource": "*",
},
{
"Action": [
"ecs:DescribeServices",
],
"Effect": "Allow",
"Resource": builder_services
},
{
"Action": [
"ecs:UpdateService",
],
"Effect": "Allow",
"Resource": builder_services
},
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
],
"Resource": [
"arn:aws:s3:::status.ksp-ckan.space/*"
],
},
]
}
))
# Indexer Compute
# We could utilise an autoscaling group, but that is way
# more complicated for our use case. If at some point we'd
# to scale the service beyond a single instance (due to some
# infrastructure sponsorship) it wouldn't take more than
# adding an AutoScalingGroup + LoadBalancer to scale this.
netkan_ecs = t.add_resource(
Cluster('NetKANCluster', ClusterName='NetKANCluster')
)
netkan_userdata = Sub("""
#!/bin/bash -xe
echo ECS_CLUSTER=NetKANCluster > /etc/ecs/ecs.config
yum install -y aws-cfn-bootstrap
# Install the files and packages from the metadata
/opt/aws/bin/cfn-init -v --stack ${AWS::StackName} \
--resource NetKANCompute --region ${AWS::Region}
# ECS Volumes are a pain and I don't want to shave any more yaks
mkdir /mnt/letsencrypt
mkfs.ext4 -L CKANCACHE /dev/xvdh
mkdir -p /mnt/ckan_cache
echo "LABEL=CKANCACHE /mnt/ckan_cache ext4 defaults 0 2" >> /etc/fstab
mount -a
chown -R 1000:1000 /mnt/ckan_cache
# Docker doesn't see the new block device until restarted
service docker stop && service docker start
systemctl start ecs
# Start up the cfn-hup daemon to listen for changes
# to the metadata
/opt/aws/bin/cfn-hup || error_exit 'Failed to start cfn-hup
# Signal the status from cfn-init
/opt/aws/bin/cfn-signal -e $? --stack ${AWS::StackName} \
--resource NetKANCompute --region ${AWS::Region}
""")
cfn_hup = InitFile(
content=Sub(
"[main]\nstack=${AWS::StackId}\nregion=${AWS::Region}\n"
),
mode='000400',
owner='root',
group='root'
)
reloader = InitFile(
content=Sub("""
[cfn-auto-reloader-hook]
triggers=post.add, post.update
path=Resources.NetKANCompute.Metadata.AWS::CloudFormation::Init
action=/opt/aws/bin/cfn-init -s ${AWS::StackId} -r NetKANCompute --region ${AWS::Region}
runas=root
""")
)
docker = InitFile(
content="""
{
"log-driver": "json-file",
"log-opts": {
"max-size": "20m",
"max-file": "3"
}
}
""")
cfn_service = InitService(
enabled=True,
ensureRunning=True,
files=[
'/etc/cfn/cfn-hup.conf',
'/etc/cfn/hooks.d/cfn-auto-reloader.conf',
]
)
docker_service = InitService(
enabled=True,
ensureRunning=True,
files=['/etc/docker/daemon.json']
)
netkan_instance = Instance(
'NetKANCompute',
# ECS Optimised us-west-2
ImageId='ami-0e434a58221275ed4',
InstanceType='t3.micro',
IamInstanceProfile=Ref(netkan_profile),
KeyName='techman83_alucard',
SecurityGroups=['ckan-bot'],
UserData=Base64(netkan_userdata),
# t3 instances are unlimited by default
CreditSpecification=CreditSpecification(CPUCredits='standard'),
Tags=[
Tag(Key='Name', Value='NetKAN Indexer'),
Tag(Key='Service', Value='Indexer'),
],
Metadata=Metadata(Init({
'config': InitConfig(
files=InitFiles({
'/etc/cfn/cfn-hup.conf': cfn_hup,
'/etc/cfn/hooks.d/cfn-auto-reloader.conf': reloader,
'/etc/docker/daemon.json': docker,
})
),
'services': {
'sysvinit': {
'cfn': cfn_service,
'docker': docker_service,
}
},
})),
BlockDeviceMappings=[
BlockDeviceMapping(
DeviceName='/dev/xvdh',
Ebs=EBSBlockDevice(
VolumeSize='50',
VolumeType='standard',
)
)
]
)
t.add_resource(netkan_instance)
t.add_resource(RecordSetType(
"NetKANDns",
HostedZoneId=ZONE_ID,
Comment="NetKAN Bot DNS",
Name=BOT_FQDN,
Type="A",
TTL="900",
ResourceRecords=[GetAtt('NetKANCompute', "PublicIp")],
))
services = [
{
'name': 'Indexer',
'command': 'indexer',
'memory': '156',
'secrets': [
'SSH_KEY', 'GH_Token',
],
'env': [
('CKANMETA_REMOTE', CKANMETA_REMOTE),
('CKANMETA_USER', CKANMETA_USER),
('CKANMETA_REPO', CKANMETA_REPO),
('SQS_QUEUE', GetAtt(outbound, 'QueueName')),
('AWS_DEFAULT_REGION', Sub('${AWS::Region}')),
],
'volumes': [
('ckan_cache', '/home/netkan/ckan_cache')
],
},
{
'name': 'Scheduler',
'command': 'scheduler',
'memory': '156',
'secrets': ['SSH_KEY'],
'env': [
('SQS_QUEUE', GetAtt(inbound, 'QueueName')),
('NETKAN_REMOTE', NETKAN_REMOTE),
('CKANMETA_REMOTE', CKANMETA_REMOTE),
('AWS_DEFAULT_REGION', Sub('${AWS::Region}')),
],
'schedule': 'rate(2 hours)',
},
{
'name': 'SchedulerWebhooksPass',
'command': [
'scheduler', '--group', 'webhooks',
'--max-queued', '2000',
'--min-credits', '100'
],
'memory': '156',
'secrets': ['SSH_KEY'],
'env': [
('SQS_QUEUE', GetAtt(inbound, 'QueueName')),
('NETKAN_REMOTE', NETKAN_REMOTE),
('CKANMETA_REMOTE', CKANMETA_REMOTE),
('AWS_DEFAULT_REGION', Sub('${AWS::Region}')),
],
'schedule': 'rate(1 day)',
},
{
'name': 'CleanCache',
'command': [
'clean-cache',
'--days', '30',
],
'env': [],
'volumes': [
('ckan_cache', '/home/netkan/ckan_cache')
],
'schedule': 'rate(1 day)',
},
{
'name': 'Inflator',
'image': 'kspckan/inflator',
'memory': '156',
'secrets': ['GH_Token'],
'env': [
(
'QUEUES', Sub(
'${Inbound},${Outbound}',
Inbound=GetAtt(inbound, 'QueueName'),
Outbound=GetAtt(outbound, 'QueueName')
)
),
('AWS_REGION', Sub('${AWS::Region}')),
],
'volumes': [
('ckan_cache', '/home/netkan/ckan_cache')
]
},
{
'name': 'StatusDumper',
'command': 'export-status-s3',
'env': [
('STATUS_BUCKET', STATUS_BUCKET),
('STATUS_KEY', status_key),
('STATUS_INTERVAL', '0'),
],
'schedule': 'rate(5 minutes)',
},
{
'name': 'DownloadCounter',
'command': 'download-counter',
'memory': '156',
'secrets': [
'SSH_KEY', 'GH_Token',
],
'env': [
('NETKAN_REMOTE', NETKAN_REMOTE),
('CKANMETA_REMOTE', CKANMETA_REMOTE),
],
'schedule': 'rate(1 day)',
},
{
'name': 'CertBot',
'image': 'certbot/dns-route53',
'command': [
'certonly', '-n', '--agree-tos', '--email',
EMAIL, '--dns-route53', '-d', BOT_FQDN
],
'volumes': [
('letsencrypt', '/etc/letsencrypt')
],
'schedule': 'cron(0 0 ? * MON *)',
},
# TODO: It'd be nice to detect a new cert, this'll do for now.
{
'name': 'RestartWebhooks',
'command': [
'redeploy-service',
'--cluster', 'NetKANCluster',
'--service-name', 'WebhooksService',
],
'env': [
('AWS_DEFAULT_REGION', Sub('${AWS::Region}')),
],
'schedule': 'cron(30 0 ? * MON *)',
},
{
'name': 'TicketCloser',
'command': 'ticket-closer',
'env': [],
'secrets': ['GH_Token'],
'schedule': 'rate(1 day)',
},
{
'name': 'AutoFreezer',
'command': 'auto-freezer',
'env': [
('NETKAN_REMOTE', NETKAN_REMOTE),
('NETKAN_USER', NETKAN_USER),
('NETKAN_REPO', NETKAN_REPO),
],
'secrets': [
'SSH_KEY', 'GH_Token',
],
'schedule': 'rate(7 days)',
},
{
'name': 'Webhooks',
'containers': [
{
'name': 'legacyhooks',
'image': 'kspckan/webhooks',
'memory': '156',
'secrets': [
'SSH_KEY', 'GH_Token', 'XKAN_GHSECRET',
'IA_access', 'IA_secret',
],
'env': [
('CKAN_meta', CKANMETA_REMOTE),
('NetKAN', NETKAN_REMOTE),
('IA_collection', 'kspckanmods'),
],
'volumes': [
('ckan_cache', '/home/netkan/ckan_cache')
],
},
{
'name': 'webhooks',
'entrypoint': '.local/bin/gunicorn',
'command': [
'-b', '0.0.0.0:5000', '--access-logfile', '-',
'netkan.webhooks:create_app()'
],
'secrets': [
'XKAN_GHSECRET', 'SSH_KEY',
],
'env': [
('NETKAN_REMOTE', NETKAN_REMOTE),
('CKANMETA_REMOTE', CKANMETA_REMOTE),
('AWS_DEFAULT_REGION', Sub('${AWS::Region}')),
('INFLATION_SQS_QUEUE', GetAtt(inbound, 'QueueName')),
],
},
{
'name': 'WebhooksProxy',
'image': 'kspckan/webhooks-proxy',
'ports': ['80', '443'],
'volumes': [
('letsencrypt', '/etc/letsencrypt')
],
'depends': ['webhooks', 'legacyhooks']
},
]
},
]
for service in services:
name = service['name']
schedule = service.get('schedule')
containers = service.get('containers', [service])
task = TaskDefinition(
'{}Task'.format(name),
ContainerDefinitions=[],
Family=Sub('${AWS::StackName}${name}', name=name),
ExecutionRoleArn=Ref(netkan_ecs_role),
Volumes=[],
DependsOn=[],
)
for container in containers:
secrets = [
'DISCORD_WEBHOOK_ID', 'DISCORD_WEBHOOK_TOKEN',
*container.get('secrets', [])
]
envs = container.get('env', [])
entrypoint = container.get('entrypoint')
command = container.get('command')
volumes = container.get('volumes', [])
ports = container.get('ports', [])
depends = container.get('depends', [])
definition = ContainerDefinition(
Image=container.get('image', 'kspckan/netkan'),
Memory=container.get('memory', '96'),
Name=container['name'],
Secrets=[
Secret(
Name=x,
ValueFrom='{}{}'.format(
PARAM_NAMESPACE, x
)
) for x in secrets
],
Environment=[
Environment(
Name=x[0], Value=x[1]
) for x in envs
],
MountPoints=[],
PortMappings=[],
DependsOn=[],
Links=[],
)
if entrypoint:
entrypoint = entrypoint if isinstance(entrypoint, list) else [entrypoint]
definition.EntryPoint = entrypoint
if command:
command = command if isinstance(command, list) else [command]
definition.Command = command
for volume in volumes:
volume_name = '{}{}'.format(
name,
''.join([i for i in volume[0].capitalize() if i.isalpha()])
)
task.Volumes.append(
Volume(
Name=volume_name,
Host=Host(
SourcePath=('/mnt/{}'.format(volume[0]))
)
)
)
definition.MountPoints.append(
MountPoint(
ContainerPath=volume[1],
SourceVolume=volume_name
)
)
for port in ports:
definition.PortMappings.append(
PortMapping(
ContainerPort=port,
HostPort=port,
Protocol='tcp',
)
)
for depend in depends:
definition.DependsOn.append(
ContainerDependency(
Condition='START',
ContainerName=depend,
)
)
definition.Links.append(depend)
task.ContainerDefinitions.append(definition)
t.add_resource(task)
if schedule:
target = Target(
Id="{}-Schedule".format(name),
Arn=GetAtt(netkan_ecs, 'Arn'),
RoleArn=GetAtt(netkan_scheduler_role, 'Arn'),
EcsParameters=EcsParameters(
TaskDefinitionArn=Ref(task)
)
)
t.add_resource(Rule(
'{}Rule'.format(name),
Description='{} scheduled task'.format(name),
ScheduleExpression=schedule,
Targets=[target],
))
continue
t.add_resource(Service(
'{}Service'.format(name),
Cluster='NetKANCluster',
DesiredCount=1,
TaskDefinition=Ref(task),
# Allow for in place service redeployments
DeploymentConfiguration=DeploymentConfiguration(
MaximumPercent=100,
MinimumHealthyPercent=0
),
DependsOn=['NetKANCluster']
))
print(t.to_yaml())
| 31.002232
| 108
| 0.4505
|
794a62bdae414f559d8fced1bd06ee5124e698d6
| 1,597
|
py
|
Python
|
Funny_Js_Crack/76-openlaw(RSA)/openlaw_login.py
|
qqizai/Func_Js_Crack
|
8cc8586107fecace4b71d0519cfbc760584171b1
|
[
"MIT"
] | 18
|
2020-12-09T06:49:46.000Z
|
2022-01-27T03:20:36.000Z
|
Funny_Js_Crack/76-openlaw(RSA)/openlaw_login.py
|
sumerzhang/Func_Js_Crack
|
8cc8586107fecace4b71d0519cfbc760584171b1
|
[
"MIT"
] | null | null | null |
Funny_Js_Crack/76-openlaw(RSA)/openlaw_login.py
|
sumerzhang/Func_Js_Crack
|
8cc8586107fecace4b71d0519cfbc760584171b1
|
[
"MIT"
] | 9
|
2020-12-20T08:52:09.000Z
|
2021-12-19T09:13:09.000Z
|
# -*- coding: utf-8 -*-
# @Time: 2019/12/17 10:54
# @Version: 1.0
# @Email: nnlcccc@outlook.com
# 代码千万条,整洁第一条,代码不规范,调试两行泪
import re
import requests
import execjs
userName = 'user'
password = 'password'
with open('./openlaw_login.js','r',encoding='utf-8') as f:
login_js = execjs.compile(f.read())
session = requests.session()
keyEncrypt_password = login_js.call('keyEncrypt',password)
login_url = 'http://openlaw(RSA).cn/login'
raw_headers = '''Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
Accept-Encoding: gzip, deflate
Accept-Language: zh-CN,zh;q=0.9
Cache-Control: max-age=0
Connection: keep-alive
Content-Length: 526
Content-Type: application/x-www-form-urlencoded
Host: openlaw(RSA).cn
Origin: http://openlaw(RSA).cn
Referer: http://openlaw(RSA).cn/login.jsp?logout
Upgrade-Insecure-Requests: 1
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36'''
headers = dict([line.split(": ",1) for line in raw_headers.split("\n")])
res = session.get('http://openlaw(RSA).cn/login.jsp?logout')
csrf = re.compile(r'csrf" value="(.*?)"')
formdata = {
'_csrf': csrf.search(res.text).group(1),
'username': userName,
'password': keyEncrypt_password,
'_spring_security_remember_me': 'true',
}
_csrf_resp = session.post(login_url,headers=headers,data=formdata,timeout=10,allow_redirects=False)
login_result = session.get('http://openlaw(RSA).cn/user/profile.jsp')
if userName in login_result.text:
print('登陆成功!')
| 31.94
| 149
| 0.720726
|
794a62d5979c4b293cda9759a1f59752d08d2e92
| 45,031
|
py
|
Python
|
python/tests/phonenumbermatchertest.py
|
vemel/python-phonenumbers
|
595c322bf12106a3b95e3f202e948a7c6b6c15b8
|
[
"Apache-2.0"
] | 1
|
2021-02-16T10:02:00.000Z
|
2021-02-16T10:02:00.000Z
|
python/tests/phonenumbermatchertest.py
|
vemel/python-phonenumbers
|
595c322bf12106a3b95e3f202e948a7c6b6c15b8
|
[
"Apache-2.0"
] | null | null | null |
python/tests/phonenumbermatchertest.py
|
vemel/python-phonenumbers
|
595c322bf12106a3b95e3f202e948a7c6b6c15b8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Unit tests for phonenumbermatcher.py"""
# Based on original Java code:
# java/test/com/google/i18n/phonenumbers/PhoneNumberMatchTest.java
# java/test/com/google/i18n/phonenumbers/PhoneNumberMatcherTest.java
# Copyright (C) 2011 The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from phonenumbers import PhoneNumberMatch, PhoneNumberMatcher, Leniency
from phonenumbers import PhoneNumber, NumberFormat, phonenumberutil
from .testmetadatatest import TestMetadataTestCase
class PhoneNumberMatchTest(unittest.TestCase):
"""Tests the value type semantics for PhoneNumberMatch.
Equality must be based on the covered range and corresponding phone
number. Range and number correctness are tested by PhoneNumberMatcherTest.
"""
def setUp(self):
pass
def tearDown(self):
pass
def testValueTypeSemantics(self):
number = PhoneNumber()
match1 = PhoneNumberMatch(10, "1 800 234 45 67", number)
match2 = PhoneNumberMatch(10, "1 800 234 45 67", number)
match3 = PhoneNumberMatch(10, "1 801 234 45 67", number)
self.assertEqual(match1, match2)
self.assertEqual(match1.start, match2.start)
self.assertEqual(match1.end, match2.end)
self.assertEqual(match1.number, match2.number)
self.assertEqual(match1.raw_string, match2.raw_string)
self.assertEqual("1 800 234 45 67", match1.raw_string)
# Python-specific: check __ne__()
self.assertNotEqual(match1, match3)
self.assertTrue(match1 != match3)
# Python-specific: Check only comparisons of the same type work
self.assertNotEqual(match1, None)
self.assertNotEqual(match1, "")
self.assertNotEqual(match1, "1 800 234 45 67")
self.assertNotEqual(match1, 0)
def testIllegalArguments(self):
"""Tests the value type semantics for matches with a None number."""
try:
PhoneNumberMatch(-110, "1 800 234 45 67", PhoneNumber())
self.fail("Expected failed constructor")
except Exception:
pass
try:
PhoneNumberMatch(10, "1 800 234 45 67", None)
self.fail("Expected failed constructor")
except Exception:
pass
try:
PhoneNumberMatch(10, None, PhoneNumber())
self.fail("Expected failed constructor")
except Exception:
pass
try:
PhoneNumberMatch(10, None, None)
self.fail("Expected failed constructor")
except Exception:
pass
def testStringConvert(self):
"""Check string conversion"""
number = PhoneNumber()
match = PhoneNumberMatch(10, "1 800 234 45 67", number)
self.assertEqual("PhoneNumberMatch [10,25) 1 800 234 45 67", str(match))
# Python version extra test
self.assertEqual("PhoneNumberMatch(start=10, raw_string='1 800 234 45 67', " +
"numobj=PhoneNumber(country_code=None, national_number=None, extension=None, " +
"italian_leading_zero=False, country_code_source=None, preferred_domestic_carrier_code=None))", repr(match))
class NumberContext(object):
"""Small class that holds the context of the number we are testing
against. The test will insert the phone number to be found between
leadingText and trailingText."""
def __init__(self, leadingText, trailingText):
self.leadingText = leadingText
self.trailingText = trailingText
class NumberTest(object):
"""Small class that holds the number we want to test and the region for
which it should be valid."""
def __init__(self, rawString, region):
self.rawString = rawString
self.region = region
def __str__(self):
return "%s (%s)" % (self.rawString, self.region)
# Strings with number-like things that shouldn't be found under any level.
IMPOSSIBLE_CASES = [NumberTest("12345", "US"),
NumberTest("23456789", "US"),
NumberTest("234567890112", "US"),
NumberTest("650+253+1234", "US"),
NumberTest("3/10/1984", "CA"),
NumberTest("03/27/2011", "US"),
NumberTest("31/8/2011", "US"),
NumberTest("1/12/2011", "US"),
NumberTest("10/12/82", "DE"),
NumberTest("650x2531234", "US"),
NumberTest("2012-01-02 08:00", "US"),
NumberTest("2012/01/02 08:00", "US"),
NumberTest("20120102 08:00", "US"),
]
# Strings with number-like things that should only be found under "possible".
POSSIBLE_ONLY_CASES = [ # US numbers cannot start with 7 in the test metadata to be valid.
NumberTest("7121115678", "US"),
# 'X' should not be found in numbers at leniencies stricter than POSSIBLE, unless it represents
# a carrier code or extension.
NumberTest("1650 x 253 - 1234", "US"),
NumberTest("650 x 253 - 1234", "US"),
NumberTest("6502531x234", "US"),
NumberTest("(20) 3346 1234", "GB"), # Non-optional NP omitted
]
# Strings with number-like things that should only be found up to and
# including the "valid" leniency level.
VALID_CASES = [NumberTest("65 02 53 00 00", "US"),
NumberTest("6502 538365", "US"),
NumberTest("650//253-1234", "US"), # 2 slashes are illegal at higher levels
NumberTest("650/253/1234", "US"),
NumberTest("9002309. 158", "US"),
NumberTest("12 7/8 - 14 12/34 - 5", "US"),
NumberTest("12.1 - 23.71 - 23.45", "US"),
NumberTest("800 234 1 111x1111", "US"),
NumberTest("1979-2011 100", "US"),
NumberTest("+494949-4-94", "DE"), # National number in wrong format
NumberTest(u"\uFF14\uFF11\uFF15\uFF16\uFF16\uFF16\uFF16-\uFF17\uFF17\uFF17", "US"),
NumberTest("2012-0102 08", "US"), # Very strange formatting.
NumberTest("2012-01-02 08", "US"),
# Breakdown assistance number with unexpected formatting.
NumberTest("1800-1-0-10 22", "AU"),
NumberTest("030-3-2 23 12 34", "DE"),
NumberTest("03 0 -3 2 23 12 34", "DE"),
NumberTest("(0)3 0 -3 2 23 12 34", "DE"),
NumberTest("0 3 0 -3 2 23 12 34", "DE"),
]
# Strings with number-like things that should only be found up to and
# including the "strict_grouping" leniency level.
STRICT_GROUPING_CASES = [NumberTest("(415) 6667777", "US"),
NumberTest("415-6667777", "US"),
# Should be found by strict grouping but not exact
# grouping, as the last two groups are formatted
# together as a block.
NumberTest("0800-2491234", "DE"),
# Doesn't match any formatting in the test file, but
# almost matches an alternate format (the last two
# groups have been squashed together here).
NumberTest("0900-1 123123", "DE"),
NumberTest("(0)900-1 123123", "DE"),
NumberTest("0 900-1 123123", "DE"),
]
# Strings with number-like things that should be found at all levels.
EXACT_GROUPING_CASES = [NumberTest(u"\uFF14\uFF11\uFF15\uFF16\uFF16\uFF16\uFF17\uFF17\uFF17\uFF17", "US"),
NumberTest(u"\uFF14\uFF11\uFF15-\uFF16\uFF16\uFF16-\uFF17\uFF17\uFF17\uFF17", "US"),
NumberTest("4156667777", "US"),
NumberTest("4156667777 x 123", "US"),
NumberTest("415-666-7777", "US"),
NumberTest("415/666-7777", "US"),
NumberTest("415-666-7777 ext. 503", "US"),
NumberTest("1 415 666 7777 x 123", "US"),
NumberTest("+1 415-666-7777", "US"),
NumberTest("+494949 49", "DE"),
NumberTest("+49-49-34", "DE"),
NumberTest("+49-4931-49", "DE"),
NumberTest("04931-49", "DE"), # With National Prefix
NumberTest("+49-494949", "DE"), # One group with country code
NumberTest("+49-494949 ext. 49", "DE"),
NumberTest("+49494949 ext. 49", "DE"),
NumberTest("0494949", "DE"),
NumberTest("0494949 ext. 49", "DE"),
NumberTest("01 (33) 3461 2234", "MX"), # Optional NP present
NumberTest("(33) 3461 2234", "MX"), # Optional NP omitted
NumberTest("1800-10-10 22", "AU"), # Breakdown assistance number.
# Doesn't match any formatting in the test file, but
# matches an alternate format exactly.
NumberTest("0900-1 123 123", "DE"),
NumberTest("(0)900-1 123 123", "DE"),
NumberTest("0 900-1 123 123", "DE"),
]
class PhoneNumberMatcherTest(TestMetadataTestCase):
"""Tests for PhoneNumberMatcher.
This only tests basic functionality based on test metadata. See
testphonenumberutil.py for the origin of the test data.
"""
# See PhoneNumberUtilTest.testParseNationalNumber().
def testFindNationalNumber(self):
# same cases as in testParseNationalNumber
self.doTestFindInContext("033316005", "NZ")
# self.doTestFindInContext("33316005", "NZ") is omitted since the
# national prefix is obligatory for these types of numbers in New Zealand.
# National prefix attached and some formatting present.
self.doTestFindInContext("03-331 6005", "NZ")
self.doTestFindInContext("03 331 6005", "NZ")
# Testing international prefixes.
# Should strip country code.
self.doTestFindInContext("0064 3 331 6005", "NZ")
# Try again, but this time we have an international number with Region
# Code US. It should recognize the country code and parse accordingly.
self.doTestFindInContext("01164 3 331 6005", "US")
self.doTestFindInContext("+64 3 331 6005", "US")
self.doTestFindInContext("64(0)64123456", "NZ")
# Check that using a "/" is fine in a phone number.
self.doTestFindInContext("123/45678", "DE")
self.doTestFindInContext("123-456-7890", "US")
# See PhoneNumberUtilTest.testParseWithInternationalPrefixes().
def testFindWithInternationalPrefixes(self):
self.doTestFindInContext("+1 (650) 333-6000", "NZ")
self.doTestFindInContext("1-650-333-6000", "US")
# Calling the US number from Singapore by using different service
# providers
# 1st test: calling using SingTel IDD service (IDD is 001)
self.doTestFindInContext("0011-650-333-6000", "SG")
# 2nd test: calling using StarHub IDD service (IDD is 008)
self.doTestFindInContext("0081-650-333-6000", "SG")
# 3rd test: calling using SingTel V019 service (IDD is 019)
self.doTestFindInContext("0191-650-333-6000", "SG")
# Calling the US number from Poland
self.doTestFindInContext("0~01-650-333-6000", "PL")
# Using "++" at the start.
self.doTestFindInContext("++1 (650) 333-6000", "PL")
# Using a full-width plus sign.
self.doTestFindInContext(u"\uFF0B1 (650) 333-6000", "SG")
# The whole number, including punctuation, is here represented in
# full-width form.
self.doTestFindInContext(u"\uFF0B\uFF11\u3000\uFF08\uFF16\uFF15\uFF10\uFF09" +
u"\u3000\uFF13\uFF13\uFF13\uFF0D\uFF16\uFF10\uFF10\uFF10",
"SG")
# See PhoneNumberUtilTest.testParseWithLeadingZero().
def testFindWithLeadingZero(self):
self.doTestFindInContext("+39 02-36618 300", "NZ")
self.doTestFindInContext("02-36618 300", "IT")
self.doTestFindInContext("312 345 678", "IT")
# See PhoneNumberUtilTest.testParseNationalNumberArgentina().
def testFindNationalNumberArgentina(self):
# Test parsing mobile numbers of Argentina.
self.doTestFindInContext("+54 9 343 555 1212", "AR")
self.doTestFindInContext("0343 15 555 1212", "AR")
self.doTestFindInContext("+54 9 3715 65 4320", "AR")
self.doTestFindInContext("03715 15 65 4320", "AR")
# Test parsing fixed-line numbers of Argentina.
self.doTestFindInContext("+54 11 3797 0000", "AR")
self.doTestFindInContext("011 3797 0000", "AR")
self.doTestFindInContext("+54 3715 65 4321", "AR")
self.doTestFindInContext("03715 65 4321", "AR")
self.doTestFindInContext("+54 23 1234 0000", "AR")
self.doTestFindInContext("023 1234 0000", "AR")
# See PhoneNumberUtilTest.testParseWithXInNumber().
def testFindWithXInNumber(self):
self.doTestFindInContext("(0xx) 123456789", "AR")
# A case where x denotes both carrier codes and extension symbol.
self.doTestFindInContext("(0xx) 123456789 x 1234", "AR")
# This test is intentionally constructed such that the number of digit
# after xx is larger than 7, so that the number won't be mistakenly
# treated as an extension, as we allow extensions up to 7 digits. This
# assumption is okay for now as all the countries where a carrier
# selection code is written in the form of xx have a national
# significant number of length larger than 7.
self.doTestFindInContext("011xx5481429712", "US")
# See PhoneNumberUtilTest.testParseNumbersMexico().
def testFindNumbersMexico(self):
# Test parsing fixed-line numbers of Mexico.
self.doTestFindInContext("+52 (449)978-0001", "MX")
self.doTestFindInContext("01 (449)978-0001", "MX")
self.doTestFindInContext("(449)978-0001", "MX")
# Test parsing mobile numbers of Mexico.
self.doTestFindInContext("+52 1 33 1234-5678", "MX")
self.doTestFindInContext("044 (33) 1234-5678", "MX")
self.doTestFindInContext("045 33 1234-5678", "MX")
# See PhoneNumberUtilTest.testParseNumbersWithPlusWithNoRegion().
def testFindNumbersWithPlusWithNoRegion(self):
# "ZZ" is allowed only if the number starts with a '+' - then the
# country code can be calculated.
self.doTestFindInContext("+64 3 331 6005", "ZZ")
# None is also allowed for the region code in these cases.
self.doTestFindInContext("+64 3 331 6005", None)
# See PhoneNumberUtilTest.testParseExtensions().
def testFindExtensions(self):
self.doTestFindInContext("03 331 6005 ext 3456", "NZ")
self.doTestFindInContext("03-3316005x3456", "NZ")
self.doTestFindInContext("03-3316005 int.3456", "NZ")
self.doTestFindInContext("03 3316005 #3456", "NZ")
self.doTestFindInContext("0~0 1800 7493 524", "PL")
self.doTestFindInContext("(1800) 7493.524", "US")
# Check that the last instance of an extension token is matched.
self.doTestFindInContext("0~0 1800 7493 524 ~1234", "PL")
# Verifying bug-fix where the last digit of a number was previously omitted if it was a 0 when
# extracting the extension. Also verifying a few different cases of extensions.
self.doTestFindInContext("+44 2034567890x456", "NZ")
self.doTestFindInContext("+44 2034567890x456", "GB")
self.doTestFindInContext("+44 2034567890 x456", "GB")
self.doTestFindInContext("+44 2034567890 X456", "GB")
self.doTestFindInContext("+44 2034567890 X 456", "GB")
self.doTestFindInContext("+44 2034567890 X 456", "GB")
self.doTestFindInContext("+44 2034567890 X 456", "GB")
self.doTestFindInContext("(800) 901-3355 x 7246433", "US")
self.doTestFindInContext("(800) 901-3355 , ext 7246433", "US")
self.doTestFindInContext("(800) 901-3355 ,extension 7246433", "US")
# The next test differs from phonenumberutil -> when matching we don't
# consider a lone comma to indicate an extension, although we accept
# it when parsing.
self.doTestFindInContext("(800) 901-3355 ,x 7246433", "US")
self.doTestFindInContext("(800) 901-3355 ext: 7246433", "US")
def testFindInterspersedWithSpace(self):
self.doTestFindInContext("0 3 3 3 1 6 0 0 5", "NZ")
# Test matching behavior when starting in the middle of a phone number.
def testIntermediateParsePositions(self):
text = "Call 033316005 or 032316005!"
# | | | | | |
# 0 5 10 15 20 25
# Iterate over all possible indices.
for ii in xrange(6):
self.assertEqualRange(text, ii, 5, 14)
# 7 and 8 digits in a row are still parsed as number.
self.assertEqualRange(text, 6, 6, 14)
self.assertEqualRange(text, 7, 7, 14)
# Anything smaller is skipped to the second instance.
for ii in xrange(8, 20):
self.assertEqualRange(text, ii, 19, 28)
def testMatchWithSurroundingZipcodes(self):
number = "415-666-7777"
zipPreceding = "My address is CA 34215 - " + number + " is my number."
expectedResult = phonenumberutil.parse(number, "US")
matcher = PhoneNumberMatcher(zipPreceding, "US")
if matcher.has_next():
match = matcher.next()
else:
match = None
self.assertTrue(match is not None,
msg="Did not find a number in '" + zipPreceding + "'; expected " + number)
self.assertEqual(expectedResult, match.number)
self.assertEqual(number, match.raw_string)
# Now repeat, but this time the phone number has spaces in it. It should still be found.
number = "(415) 666 7777"
zipFollowing = "My number is " + number + ". 34215 is my zip-code."
matcher = PhoneNumberMatcher(zipFollowing, "US")
if matcher.has_next():
matchWithSpaces = matcher.next()
else:
matchWithSpaces = None
self.assertTrue(matchWithSpaces is not None,
msg="Did not find a number in '" + zipFollowing + "'; expected " + number)
self.assertEqual(expectedResult, matchWithSpaces.number)
self.assertEqual(number, matchWithSpaces.raw_string)
def testIsLatinLetter(self):
self.assertTrue(PhoneNumberMatcher._is_latin_letter('c'))
self.assertTrue(PhoneNumberMatcher._is_latin_letter('C'))
self.assertTrue(PhoneNumberMatcher._is_latin_letter(u'\u00C9'))
self.assertTrue(PhoneNumberMatcher._is_latin_letter(u'\u0301')) # Combining acute accent
# Punctuation, digits and white-space are not considered "latin letters".
self.assertFalse(PhoneNumberMatcher._is_latin_letter(':'))
self.assertFalse(PhoneNumberMatcher._is_latin_letter('5'))
self.assertFalse(PhoneNumberMatcher._is_latin_letter('-'))
self.assertFalse(PhoneNumberMatcher._is_latin_letter('.'))
self.assertFalse(PhoneNumberMatcher._is_latin_letter(' '))
self.assertFalse(PhoneNumberMatcher._is_latin_letter(u'\u6211')) # Chinese character
self.assertFalse(PhoneNumberMatcher._is_latin_letter(u'\u306E')) # Hiragana letter no
def testMatchesWithSurroundingLatinChars(self):
possibleOnlyContexts = []
possibleOnlyContexts.append(NumberContext("abc", "def"))
possibleOnlyContexts.append(NumberContext("abc", ""))
possibleOnlyContexts.append(NumberContext("", "def"))
# Latin capital letter e with an acute accent.
possibleOnlyContexts.append(NumberContext(u"\u00C9", ""))
# e with an acute accent decomposed (with combining mark).
possibleOnlyContexts.append(NumberContext(u"e\u0301", ""))
# Numbers should not be considered valid, if they are surrounded by
# Latin characters, but should be considered possible.
self.findMatchesInContexts(possibleOnlyContexts, False, True)
def testMoneyNotSeenAsPhoneNumber(self):
possibleOnlyContexts = []
possibleOnlyContexts.append(NumberContext("$", ""))
possibleOnlyContexts.append(NumberContext("", "$"))
possibleOnlyContexts.append(NumberContext(u"\u00A3", "")) # Pound sign
possibleOnlyContexts.append(NumberContext(u"\u00A5", "")) # Yen sign
self.findMatchesInContexts(possibleOnlyContexts, False, True)
def testPercentageNotSeenAsPhoneNumber(self):
possibleOnlyContexts = []
possibleOnlyContexts.append(NumberContext("", "%"))
# Numbers followed by % should be dropped.
self.findMatchesInContexts(possibleOnlyContexts, False, True)
def testPhoneNumberWithLeadingOrTrailingMoneyMatches(self):
# Because of the space after the 20 (or before the 100) these dollar
# amounts should not stop the actual number from being found.
contexts = []
contexts.append(NumberContext("$20 ", ""))
contexts.append(NumberContext("", " 100$"))
self.findMatchesInContexts(contexts, True, True)
def testMatchesWithSurroundingLatinCharsAndLeadingPunctuation(self):
# Contexts with trailing characters. Leading characters are okay here
# since the numbers we will insert start with punctuation, but
# trailing characters are still not allowed.
possibleOnlyContexts = []
possibleOnlyContexts.append(NumberContext("abc", "def"))
possibleOnlyContexts.append(NumberContext("", "def"))
possibleOnlyContexts.append(NumberContext("", u"\u00C9"))
# Numbers should not be considered valid, if they have trailing Latin
# characters, but should be considered possible.
numberWithPlus = "+14156667777"
numberWithBrackets = "(415)6667777"
self.findMatchesInContexts(possibleOnlyContexts, False, True, "US", numberWithPlus)
self.findMatchesInContexts(possibleOnlyContexts, False, True, "US", numberWithBrackets)
validContexts = []
validContexts.append(NumberContext("abc", ""))
validContexts.append(NumberContext(u"\u00C9", ""))
validContexts.append(NumberContext(u"\u00C9", ".")) # Trailing punctuation.
validContexts.append(NumberContext(u"\u00C9", " def")) # Trailing white-space.
# Numbers should be considered valid, since they start with punctuation.
self.findMatchesInContexts(validContexts, True, True, "US", numberWithPlus)
self.findMatchesInContexts(validContexts, True, True, "US", numberWithBrackets)
def testMatchesWithSurroundingChineseChars(self):
validContexts = []
validContexts.append(NumberContext(u"\u6211\u7684\u7535\u8BDD\u53F7\u7801\u662F", ""))
validContexts.append(NumberContext("", u"\u662F\u6211\u7684\u7535\u8BDD\u53F7\u7801"))
validContexts.append(NumberContext(u"\u8BF7\u62E8\u6253", u"\u6211\u5728\u660E\u5929"))
# Numbers should be considered valid, since they are surrounded by Chinese.
self.findMatchesInContexts(validContexts, True, True)
def testMatchesWithSurroundingPunctuation(self):
validContexts = []
validContexts.append(NumberContext("My number-", "")) # At end of text.
validContexts.append(NumberContext("", ".Nice day.")) # At start of text.
validContexts.append(NumberContext("Tel:", ".")) # Punctuation surrounds number.
validContexts.append(NumberContext("Tel: ", " on Saturdays.")) # White-space is also fine.
# Numbers should be considered valid, since they are surrounded by punctuation.
self.findMatchesInContexts(validContexts, True, True)
def testMatchesMultiplePhoneNumbersSeparatedByPhoneNumberPunctuation(self):
text = "Call 650-253-4561 -- 455-234-3451"
region = "US"
number1 = PhoneNumber(country_code=phonenumberutil.country_code_for_region(region),
national_number=6502534561L)
match1 = PhoneNumberMatch(5, "650-253-4561", number1)
number2 = PhoneNumber(country_code=phonenumberutil.country_code_for_region(region),
national_number=4552343451L)
match2 = PhoneNumberMatch(21, "455-234-3451", number2)
matches = PhoneNumberMatcher(text, region)
self.assertEqual(match1, matches.next())
self.assertEqual(match2, matches.next())
def testDoesNotMatchMultiplePhoneNumbersSeparatedWithNoWhiteSpace(self):
# No white-space found between numbers - neither is found.
text = "Call 650-253-4561--455-234-3451"
region = "US"
self.assertTrue(self.hasNoMatches(PhoneNumberMatcher(text, region)))
def testMatchesWithPossibleLeniency(self):
testCases = STRICT_GROUPING_CASES + EXACT_GROUPING_CASES + VALID_CASES + POSSIBLE_ONLY_CASES
self._doTestNumberMatchesForLeniency(testCases, Leniency.POSSIBLE)
def testNonMatchesWithPossibleLeniency(self):
testCases = IMPOSSIBLE_CASES
self._doTestNumberNonMatchesForLeniency(testCases, Leniency.POSSIBLE)
def testMatchesWithValidLeniency(self):
testCases = STRICT_GROUPING_CASES + EXACT_GROUPING_CASES + VALID_CASES
self._doTestNumberMatchesForLeniency(testCases, Leniency.VALID)
def testNonMatchesWithValidLeniency(self):
testCases = IMPOSSIBLE_CASES + POSSIBLE_ONLY_CASES
self._doTestNumberNonMatchesForLeniency(testCases, Leniency.VALID)
def testMatchesWithStrictGroupingLeniency(self):
testCases = STRICT_GROUPING_CASES + EXACT_GROUPING_CASES
self._doTestNumberMatchesForLeniency(testCases, Leniency.STRICT_GROUPING)
def testNonMatchesWithStrictGroupLeniency(self):
testCases = IMPOSSIBLE_CASES + POSSIBLE_ONLY_CASES + VALID_CASES
self._doTestNumberNonMatchesForLeniency(testCases, Leniency.STRICT_GROUPING)
def testMatchesWithExactGroupingLeniency(self):
testCases = EXACT_GROUPING_CASES
self._doTestNumberMatchesForLeniency(testCases, Leniency.EXACT_GROUPING)
def testNonMatchesExactGroupLeniency(self):
testCases = IMPOSSIBLE_CASES + POSSIBLE_ONLY_CASES + VALID_CASES + STRICT_GROUPING_CASES
self._doTestNumberNonMatchesForLeniency(testCases, Leniency.EXACT_GROUPING)
def _doTestNumberMatchesForLeniency(self, testCases, leniency):
noMatchFoundCount = 0
wrongMatchFoundCount = 0
for test in testCases:
iterator = self.findNumbersForLeniency(test.rawString, test.region, leniency)
if iterator.has_next():
match = iterator.next()
else:
match = None
if match is None:
noMatchFoundCount += 1
print >> sys.stderr, "No match found in %s for leniency: %s" % (test, leniency)
else:
if test.rawString != match.raw_string:
wrongMatchFoundCount += 1
print >> sys.stderr, "Found wrong match in test %s. Found %s" % (test, match)
self.assertEqual(0, noMatchFoundCount)
self.assertEqual(0, wrongMatchFoundCount)
def _doTestNumberNonMatchesForLeniency(self, testCases, leniency):
matchFoundCount = 0
for test in testCases:
iterator = self.findNumbersForLeniency(test.rawString, test.region, leniency)
if iterator.has_next():
match = iterator.next()
else:
match = None
if match is not None:
matchFoundCount += 1
print >> sys.stderr, "Match found in %s for leniency: %s" % (test, leniency)
self.assertEqual(0, matchFoundCount)
def findMatchesInContexts(self, contexts, isValid, isPossible,
region="US", number="415-666-7777"):
"""Helper method which tests the contexts provided and ensures
that:
- if isValid is True, they all find a test number inserted in the
middle when leniency of matching is set to VALID; else no test
number should be extracted at that leniency level
- if isPossible is True, they all find a test number inserted in the
middle when leniency of matching is set to POSSIBLE; else no test
number should be extracted at that leniency level"""
if isValid:
self.doTestInContext(number, region, contexts, Leniency.VALID)
else:
for context in contexts:
text = context.leadingText + number + context.trailingText
self.assertTrue(self.hasNoMatches(PhoneNumberMatcher(text, region)),
msg="Should not have found a number in " + text)
if isPossible:
self.doTestInContext(number, region, contexts, Leniency.POSSIBLE)
else:
for context in contexts:
text = context.leadingText + number + context.trailingText
self.assertTrue(self.hasNoMatches(PhoneNumberMatcher(text, region,
leniency=Leniency.POSSIBLE, max_tries=sys.maxint)),
msg="Should not have found a number in " + text)
def testNonMatchingBracketsAreInvalid(self):
# The digits up to the ", " form a valid US number, but it shouldn't
# be matched as one since there was a non-matching bracket present.
self.assertTrue(self.hasNoMatches(PhoneNumberMatcher("80.585 [79.964, 81.191]", "US")))
# The trailing "]" is thrown away before parsing, so the resultant
# number, while a valid US number, does not have matching brackets.
self.assertTrue(self.hasNoMatches(PhoneNumberMatcher("80.585 [79.964]", "US")))
self.assertTrue(self.hasNoMatches(PhoneNumberMatcher("80.585 ((79.964)", "US")))
# This case has too many sets of brackets to be valid.
self.assertTrue(self.hasNoMatches(PhoneNumberMatcher("(80).(585) (79).(9)64", "US")))
def testNoMatchIfRegionIsNone(self):
# Fail on non-international prefix if region code is None.
self.assertTrue(self.hasNoMatches(PhoneNumberMatcher("Random text body - number is 0331 6005, see you there", None)))
def testNoMatchInEmptyString(self):
self.assertTrue(self.hasNoMatches(PhoneNumberMatcher("", "US")))
self.assertTrue(self.hasNoMatches(PhoneNumberMatcher(" ", "US")))
def testNoMatchIfNoNumber(self):
self.assertTrue(self.hasNoMatches(PhoneNumberMatcher("Random text body - number is foobar, see you there", "US")))
def testSequences(self):
# Test multiple occurrences.
text = "Call 033316005 or 032316005!"
region = "NZ"
number1 = PhoneNumber()
number1.country_code = phonenumberutil.country_code_for_region(region)
number1.national_number = 33316005
match1 = PhoneNumberMatch(5, "033316005", number1)
number2 = PhoneNumber()
number2.country_code = phonenumberutil.country_code_for_region(region)
number2.national_number = 32316005
match2 = PhoneNumberMatch(19, "032316005", number2)
matcher = PhoneNumberMatcher(text, region, Leniency.POSSIBLE, sys.maxint)
self.assertEqual(match1, matcher.next())
self.assertEqual(match2, matcher.next())
self.assertFalse(matcher.has_next())
def testNoneInput(self):
self.assertTrue(self.hasNoMatches(PhoneNumberMatcher(None, "US")))
self.assertTrue(self.hasNoMatches(PhoneNumberMatcher(None, None)))
def testMaxMatches(self):
# Set up text with 100 valid phone numbers.
numbers = "My info: 415-666-7777," * 100
# Matches all 100. Max only applies to failed cases.
number = phonenumberutil.parse("+14156667777", None)
expected = [number] * 100
matcher = PhoneNumberMatcher(numbers, "US", Leniency.VALID, 10)
actual = [x.number for x in matcher]
self.assertEqual(expected, actual)
def testMaxMatchesInvalid(self):
# Set up text with 10 invalid phone numbers followed by 100 valid.
numbers = (("My address 949-8945-0" * 10) +
("My info: 415-666-7777," * 100))
matcher = PhoneNumberMatcher(numbers, "US", Leniency.VALID, 10)
self.assertFalse(matcher.has_next())
def testMaxMatchesMixed(self):
# Set up text with 100 valid numbers inside an invalid number.
numbers = "My info: 415-666-7777 123 fake street" * 100
# Only matches the first 10 despite there being 100 numbers due to max matches.
number = phonenumberutil.parse("+14156667777", None)
expected = [number] * 10
matcher = PhoneNumberMatcher(numbers, "US", Leniency.VALID, 10)
actual = [x.number for x in matcher]
self.assertEqual(expected, actual)
def testNonPlusPrefixedNumbersNotFoundForInvalidRegion(self):
# Does not start with a "+", we won't match it.
matcher = PhoneNumberMatcher("1 456 764 156", "ZZ")
self.assertFalse(matcher.has_next())
try:
matcher.next()
self.fail("Violation of the Iterator contract.")
except Exception:
# Success
pass
self.assertFalse(matcher.has_next())
def testEmptyIteration(self):
matcher = PhoneNumberMatcher("", "ZZ")
self.assertFalse(matcher.has_next())
self.assertFalse(matcher.has_next())
try:
matcher.next()
self.fail("Violation of the iterator contract.")
except Exception:
# Success
pass
self.assertFalse(matcher.has_next())
def testSingleIteration(self):
matcher = PhoneNumberMatcher("+14156667777", "ZZ")
# With hasNext() -> next().
# Double hasNext() to ensure it does not advance.
self.assertTrue(matcher.has_next())
self.assertTrue(matcher.has_next())
self.assertTrue(matcher.next() is not None)
self.assertFalse(matcher.has_next())
try:
matcher.next()
self.fail("Violation of the Matcher contract.")
except Exception:
# Success
pass
self.assertFalse(matcher.has_next())
# With next() only.
matcher = PhoneNumberMatcher("+14156667777", "ZZ")
self.assertTrue(matcher.next() is not None)
try:
matcher.next()
self.fail("Violation of the Matcher contract.")
except Exception:
# Success
pass
def testDoubleIteration(self):
matcher = PhoneNumberMatcher("+14156667777 foobar +14156667777 ", "ZZ")
# With hasNext() -> next().
# Double hasNext() to ensure it does not advance.
self.assertTrue(matcher.has_next())
self.assertTrue(matcher.has_next())
self.assertTrue(matcher.next() is not None)
self.assertTrue(matcher.has_next())
self.assertTrue(matcher.has_next())
self.assertTrue(matcher.next() is not None)
self.assertFalse(matcher.has_next())
try:
matcher.next()
self.fail("Violation of the Matcher contract.")
except Exception:
# Success
pass
self.assertFalse(matcher.has_next())
# With next() only.
matcher = PhoneNumberMatcher("+14156667777 foobar +14156667777 ", "ZZ")
self.assertTrue(matcher.next() is not None)
self.assertTrue(matcher.next() is not None)
try:
matcher.next()
self.fail("Violation of the Matcher contract.")
except Exception:
# Success
pass
def assertEqualRange(self, text, index, start, end):
"""Asserts that another number can be found in text starting at index, and that
its corresponding range is [start, end).
"""
sub = text[index:]
matcher = PhoneNumberMatcher(sub, "NZ", Leniency.POSSIBLE, sys.maxint)
self.assertTrue(matcher.has_next())
match = matcher.next()
self.assertEqual(start - index, match.start)
self.assertEqual(end - index, match.end)
self.assertEqual(sub[match.start:match.end], match.raw_string)
def doTestFindInContext(self, number, defaultCountry):
"""Tests numbers found by PhoneNumberMatcher in various textual contexts"""
self.findPossibleInContext(number, defaultCountry)
parsed = phonenumberutil.parse(number, defaultCountry)
if phonenumberutil.is_valid_number(parsed):
self.findValidInContext(number, defaultCountry)
def findPossibleInContext(self, number, defaultCountry):
"""Tests valid numbers in contexts that should pass for Leniency.POSSIBLE"""
contextPairs = [NumberContext("", ""), # no context
NumberContext(" ", "\t"), # whitespace only
NumberContext("Hello ", ""), # no context at end
NumberContext("", " to call me!"), # no context at start
NumberContext("Hi there, call ", " to reach me!"), # no context at start
NumberContext("Hi there, call ", ", or don't"), # with commas
# Three examples without whitespace around the number.
NumberContext("Hi call", ""),
NumberContext("", "forme"),
NumberContext("Hi call", "forme"),
# With other small numbers.
NumberContext("It's cheap! Call ", " before 6:30"),
# With a second number later.
NumberContext("Call ", " or +1800-123-4567!"),
NumberContext("Call me on June 2 at", ""), # with a Month-Day date
# With publication pages.
NumberContext("As quoted by Alfonso 12-15 (2009), you may call me at ", ""),
NumberContext("As quoted by Alfonso et al. 12-15 (2009), you may call me at ", ""),
# With dates, written in the American style.
NumberContext("As I said on 03/10/2011, you may call me at ", ""),
# With trailing numbers after a comma. The 45 should not be considered an extension.
NumberContext("", ", 45 days a year"),
# With a postfix stripped off as it looks like the start of another number.
NumberContext("Call ", "/x12 more"),
]
self.doTestInContext(number, defaultCountry, contextPairs, Leniency.POSSIBLE)
def findValidInContext(self, number, defaultCountry):
"""Tests valid numbers in contexts that fail for Leniency.POSSIBLE but
are valid for Leniency.VALID."""
contextPairs = [
# With other small numbers.
NumberContext("It's only 9.99! Call ", " to buy"),
# With a number Day.Month.Year date.
NumberContext("Call me on 21.6.1984 at ", ""),
# With a number Month/Day date.
NumberContext("Call me on 06/21 at ", ""),
# With a number Day.Month date.
NumberContext("Call me on 21.6. at ", ""),
# With a number Month/Day/Year date.
NumberContext("Call me on 06/21/84 at ", ""),
]
self.doTestInContext(number, defaultCountry, contextPairs, Leniency.VALID)
def doTestInContext(self, number, defaultCountry, contextPairs, leniency):
for context in contextPairs:
prefix = context.leadingText
text = prefix + number + context.trailingText
start = len(prefix)
end = start + len(number)
matcher = PhoneNumberMatcher(text, defaultCountry, leniency, sys.maxint)
if matcher.has_next():
match = matcher.next()
else:
match = None
self.assertTrue(match is not None,
msg="Did not find a number in '" + text + "'; expected '" + number + "'")
extracted = text[match.start:match.end]
self.assertEqual(start, match.start,
msg="Unexpected phone region in '" + text + "'; extracted '" + extracted + "'")
self.assertEqual(end, match.end,
msg="Unexpected phone region in '" + text + "'; extracted '" + extracted + "'")
self.assertEqual(number, extracted)
self.assertEqual(match.raw_string, extracted)
self.ensureTermination(text, defaultCountry, leniency)
# Exhaustively searches for phone numbers from each index within text to
# test that finding matches always terminates.
def ensureTermination(self, text, defaultCountry, leniency):
for index in xrange(len(text) + 1):
sub = text[index:]
matches = ""
# Iterates over all matches.
for match in PhoneNumberMatcher(sub, defaultCountry, leniency, sys.maxint):
matches += ", " + str(match)
def findNumbersForLeniency(self, text, defaultCountry, leniency):
return PhoneNumberMatcher(text, defaultCountry, leniency, sys.maxint)
def hasNoMatches(self, matcher):
"""Returns True if there were no matches found."""
return not matcher.has_next()
def testDoubleExtensionX(self):
# Python version extra test - multiple x for extension marker
xx_ext = "800 234 1 111 xx 1111"
# This gives different results for different leniency values (and so
# can't be used in a NumberTest).
m0 = PhoneNumberMatcher(xx_ext, "US", leniency=Leniency.POSSIBLE).next()
self.assertEqual(xx_ext, m0.raw_string)
m1 = PhoneNumberMatcher(xx_ext, "US", leniency=Leniency.VALID).next()
self.assertEqual("800 234 1 111", m1.raw_string)
matcher2 = PhoneNumberMatcher(xx_ext, "US", leniency=Leniency.STRICT_GROUPING)
self.assertFalse(matcher2.has_next())
def testInternals(self):
# Python-specific test: coverage of internals
from phonenumbers.phonenumbermatcher import _limit, _verify, _is_national_prefix_present_if_required, _get_national_number_groups
from phonenumbers import CountryCodeSource
self.assertEqual("{1,2}", _limit(1, 2))
self.assertRaises(Exception, _limit, *(-1, 2))
self.assertRaises(Exception, _limit, *(1, 0))
self.assertRaises(Exception, _limit, *(2, 1))
number = PhoneNumber(country_code=44, national_number=7912345678L)
self.assertRaises(Exception, _verify, *(99, number, "12345678"))
self.assertRaises(ValueError, PhoneNumberMatcher, *("text", "US"), **{"leniency": None})
self.assertRaises(ValueError, PhoneNumberMatcher, *("text", "US"), **{"max_tries": -2})
# Invalid country looks like national prefix is present (no way to tell)
number2 = PhoneNumber(country_code=99, national_number=12345678L, country_code_source=CountryCodeSource.FROM_DEFAULT_COUNTRY)
self.assertTrue(_is_national_prefix_present_if_required(number2))
# National prefix rule has no lead digits
number3 = PhoneNumber(country_code=61, national_number=1234567890L, country_code_source=CountryCodeSource.FROM_DEFAULT_COUNTRY)
self.assertTrue(_is_national_prefix_present_if_required(number3))
# Coverage for _get_national_number_groups() with a formatting pattern provided
us_number = PhoneNumber(country_code=1, national_number=6502530000L)
num_format = NumberFormat(pattern="(\\d{3})(\\d{3})(\\d{4})", format="\\1-\\2-\\3")
self.assertEqual(["650", "253", "0000"],
_get_national_number_groups(us_number, num_format))
| 48.893594
| 137
| 0.629078
|
794a6438a733b35e720bd6d8c6e691a41d833fcc
| 2,914
|
py
|
Python
|
jdcloud_cli/controllers/websocket/attach_request.py
|
Tanc009/jdcloud-cli
|
4e11de77c68501f44e7026c0ad1c24e5d043197e
|
[
"Apache-2.0"
] | 95
|
2018-06-05T10:49:32.000Z
|
2019-12-31T11:07:36.000Z
|
jdcloud_cli/controllers/websocket/attach_request.py
|
Tanc009/jdcloud-cli
|
4e11de77c68501f44e7026c0ad1c24e5d043197e
|
[
"Apache-2.0"
] | 22
|
2018-06-05T10:58:59.000Z
|
2020-07-31T12:13:19.000Z
|
jdcloud_cli/controllers/websocket/attach_request.py
|
Tanc009/jdcloud-cli
|
4e11de77c68501f44e7026c0ad1c24e5d043197e
|
[
"Apache-2.0"
] | 21
|
2018-06-04T12:50:27.000Z
|
2020-11-05T10:55:28.000Z
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from jdcloud_sdk.core.signer import Signer
from jdcloud_sdk.core.credential import Credential
from jdcloud_cli.utils import encode_jdcloud_headers
from jdcloud_cli.config import ProfileManager
from jdcloud_cli.const import WEBSOCKET_SCHEME, METHOD_GET
from jdcloud_cli.logger import get_logger
from jdcloud_cli.controllers.websocket.resize_tty_request import resize_tty
from jdcloud_cli.controllers.websocket.websocket_base import web_socket
class AttachRequest(object):
def __init__(self, service, scheme, endpoint, method, headers, region_id, container_id, pod_id=None):
url_map = {
'pod': '%s://%s/v1/regions/%s/pods/%s/containers/%s:attach' % (scheme, endpoint, region_id, pod_id, container_id),
'nc': '%s://%s/v1/regions/%s/containers/%s:attach' % (scheme, endpoint, region_id, container_id),
'nativecontainer': '%s://%s/v1/regions/%s/containers/%s:attach' % (scheme, endpoint, region_id, container_id)
}
self.__url = url_map[service]
self.__method = method
self.__region_id = region_id
self.__service = service
self.__headers = {'content-type': 'application/json'}
if headers is not None:
self.__headers.update(headers)
encode_jdcloud_headers(self.__headers)
def invoke_shell(self, credential):
singer = Signer(get_logger(False))
singer.sign(self.__method, self.__service, self.__region_id, self.__url, self.__headers, '', credential, '')
web_socket.invoke_shell(self.__url, self.__headers)
def attach(app, service, headers, region_id, container_id, pod_id=None):
def handle_signal(signum, frame):
h, w = web_socket.get_win_size()
resize_tty(h, w, app, service, headers, region_id, container_id, pod_id=pod_id)
web_socket.reg_winch_handler(handle_signal)
profile_manager = ProfileManager()
cli_config = profile_manager.load_current_profile()
credential = Credential(cli_config.access_key, cli_config.secret_key)
request = AttachRequest(service, WEBSOCKET_SCHEME, cli_config.endpoint, METHOD_GET, headers, region_id, container_id, pod_id=pod_id)
request.invoke_shell(credential)
h_o, w_o = web_socket.get_win_size()
resize_tty(h_o, w_o, app, service, headers, region_id, container_id, pod_id=pod_id)
| 44.151515
| 136
| 0.734386
|
794a64623e1858e685dc62057737c7a660c60a99
| 5,964
|
py
|
Python
|
basic_transactions_gp/blockchain.py
|
Robdowski/Blockchain
|
06c35ee51eb38733e87540abd1633b2045105eeb
|
[
"MIT"
] | null | null | null |
basic_transactions_gp/blockchain.py
|
Robdowski/Blockchain
|
06c35ee51eb38733e87540abd1633b2045105eeb
|
[
"MIT"
] | null | null | null |
basic_transactions_gp/blockchain.py
|
Robdowski/Blockchain
|
06c35ee51eb38733e87540abd1633b2045105eeb
|
[
"MIT"
] | null | null | null |
# Paste your version of blockchain.py from the basic_block_gp
# folder here
import hashlib
import json
from time import time
from uuid import uuid4
from flask import Flask, jsonify, request
from flask_cors import CORS
class Blockchain(object):
def __init__(self):
self.chain = []
self.current_transactions = []
# Create the genesis block
self.new_block(previous_hash=1, proof=100)
def new_transaction(self, sender, recipient, amount):
self.current_transactions.append({
'sender': sender,
'recipient': recipient,
'amount': amount,
})
return self.last_block['index'] + 1
def new_block(self, proof, previous_hash=None):
"""
Create a new Block in the Blockchain
A block should have:
* Index
* Timestamp
* List of current transactions
* The proof used to mine this block
* The hash of the previous block
:param proof: <int> The proof given by the Proof of Work algorithm
:param previous_hash: (Optional) <str> Hash of previous Block
:return: <dict> New Block
"""
block = {
'index': len(self.chain) + 1,
'timestamp': time(),
'transactions': self.current_transactions,
'proof': proof,
'previous_hash': previous_hash or self.hash(self.last_block)
}
# Reset the current list of transactions
self.current_transactions = []
# Append the chain to the block
self.chain.append(block)
# Return the new block
return block
def hash(self, block):
"""
Creates a SHA-256 hash of a Block
:param block": <dict> Block
"return": <str>
"""
# Use json.dumps to convert json into a string
string_block = json.dumps(block, sort_keys=True)
# Use hashlib.sha256 to create a hash
# It requires a `bytes-like` object, which is what
# .encode() does.
raw_hash = hashlib.sha256(string_block.encode())
# It converts the Python string into a byte string.
# We must make sure that the Dictionary is Ordered,
# or we'll have inconsistent hashes
# TODO: Create the block_string
# TODO: Hash this string using sha256
# By itself, the sha256 function returns the hash in a raw string
# that will likely include escaped characters.
# This can be hard to read, but .hexdigest() converts the
# hash to a string of hexadecimal characters, which is
# easier to work with and understand
hex_hash = raw_hash.hexdigest()
# TODO: Return the hashed block string in hexadecimal format
return hex_hash
@property
def last_block(self):
return self.chain[-1]
@staticmethod
def valid_proof(block_string, proof):
"""
Validates the Proof: Does hash(block_string + proof) contain 3
leading zeroes? Return true if the proof is valid
:param block_string: <string> The stringified block to use to
check in combination with `proof`
:param proof: <int?> The value that when combined with the
stringified previous block results in a hash that has the
correct number of leading zeroes.
:return: True if the resulting hash is a valid proof, False otherwise
"""
guess = f'{block_string}{proof}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:6] == "000000"
# Instantiate our Node
app = Flask(__name__)
CORS(app)
# Generate a globally unique address for this node
node_identifier = str(uuid4()).replace('-', '')
# Instantiate the Blockchain
blockchain = Blockchain()
@app.route('/mine', methods=['POST'])
def mine():
values = request.get_json()
required = ['proof', 'id']
if not all(k in values for k in required):
response = {'message': "Missing values"}
return jsonify(response), 400
submitted_proof = values['proof']
block_string = json.dumps(blockchain.last_block, sort_keys=True)
if blockchain.valid_proof(block_string, submitted_proof):
blockchain.new_transaction('0',
values['id'],
1)
# Forge the new Block by adding it to the chain with the proof
previous_hash = blockchain.hash(blockchain.last_block)
block = blockchain.new_block(submitted_proof, previous_hash)
response = {
"Message": "Success"
}
return jsonify(response), 200
else:
response = {
"Message": "Proof was invalid or late."
}
return jsonify(response), 200
@app.route('/transactions/new', methods=['POST'])
def receive_transaction():
values = request.get_json()
required = ['sender', 'recipient', 'amount']
if not all(k in values for k in required):
response = {'Message': "Missing values"}
return jsonify(response), 400
else:
index = blockchain.new_transaction(values['sender'],
values['recipient'],
values['amount']
)
response = {"Message": f"Transaction will be added to block {index}"}
return jsonify(response), 201
@app.route('/chain', methods=['GET'])
def full_chain():
response = {
# TODO: Return the chain and its current length
'chain': blockchain.chain,
'length': len(blockchain.chain)
}
return jsonify(response), 200
@app.route('/last_block', methods=['GET'])
def return_last_block():
response = {
'last_block': blockchain.last_block
}
return jsonify(response), 200
# Run the program on port 5000
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5000)
| 29.37931
| 77
| 0.605634
|
794a648e11b52c4650243e7c7c8dfae080ce018c
| 295
|
py
|
Python
|
EmployeeApp/urls.py
|
cs-fullstack-2019-spring/django-formclassv2-cw-MelaatiJ
|
50d53ac2d2ba2f305687898e1686ef23d945fd1d
|
[
"Apache-2.0"
] | null | null | null |
EmployeeApp/urls.py
|
cs-fullstack-2019-spring/django-formclassv2-cw-MelaatiJ
|
50d53ac2d2ba2f305687898e1686ef23d945fd1d
|
[
"Apache-2.0"
] | null | null | null |
EmployeeApp/urls.py
|
cs-fullstack-2019-spring/django-formclassv2-cw-MelaatiJ
|
50d53ac2d2ba2f305687898e1686ef23d945fd1d
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from . import views
#paths to two pages
urlpatterns = [
path("", views.index, name="index"),
path("apply/", views.apply, name="apply"),
path("applicant/", views.applicant, name="applicant")
# path("applicant/", views.applicant, name="applicant"),
]
| 26.818182
| 60
| 0.664407
|
794a6608ff9894b41446fa4f3ac2cfc8e09c3a12
| 11,657
|
py
|
Python
|
huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/update_product_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/update_product_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/update_product_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdateProductResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'app_id': 'str',
'app_name': 'str',
'product_id': 'str',
'name': 'str',
'device_type': 'str',
'protocol_type': 'str',
'data_format': 'str',
'manufacturer_name': 'str',
'industry': 'str',
'description': 'str',
'service_capabilities': 'list[ServiceCapability]',
'create_time': 'str'
}
attribute_map = {
'app_id': 'app_id',
'app_name': 'app_name',
'product_id': 'product_id',
'name': 'name',
'device_type': 'device_type',
'protocol_type': 'protocol_type',
'data_format': 'data_format',
'manufacturer_name': 'manufacturer_name',
'industry': 'industry',
'description': 'description',
'service_capabilities': 'service_capabilities',
'create_time': 'create_time'
}
def __init__(self, app_id=None, app_name=None, product_id=None, name=None, device_type=None, protocol_type=None, data_format=None, manufacturer_name=None, industry=None, description=None, service_capabilities=None, create_time=None):
"""UpdateProductResponse - a model defined in huaweicloud sdk"""
super(UpdateProductResponse, self).__init__()
self._app_id = None
self._app_name = None
self._product_id = None
self._name = None
self._device_type = None
self._protocol_type = None
self._data_format = None
self._manufacturer_name = None
self._industry = None
self._description = None
self._service_capabilities = None
self._create_time = None
self.discriminator = None
if app_id is not None:
self.app_id = app_id
if app_name is not None:
self.app_name = app_name
if product_id is not None:
self.product_id = product_id
if name is not None:
self.name = name
if device_type is not None:
self.device_type = device_type
if protocol_type is not None:
self.protocol_type = protocol_type
if data_format is not None:
self.data_format = data_format
if manufacturer_name is not None:
self.manufacturer_name = manufacturer_name
if industry is not None:
self.industry = industry
if description is not None:
self.description = description
if service_capabilities is not None:
self.service_capabilities = service_capabilities
if create_time is not None:
self.create_time = create_time
@property
def app_id(self):
"""Gets the app_id of this UpdateProductResponse.
资源空间ID。
:return: The app_id of this UpdateProductResponse.
:rtype: str
"""
return self._app_id
@app_id.setter
def app_id(self, app_id):
"""Sets the app_id of this UpdateProductResponse.
资源空间ID。
:param app_id: The app_id of this UpdateProductResponse.
:type: str
"""
self._app_id = app_id
@property
def app_name(self):
"""Gets the app_name of this UpdateProductResponse.
资源空间名称。
:return: The app_name of this UpdateProductResponse.
:rtype: str
"""
return self._app_name
@app_name.setter
def app_name(self, app_name):
"""Sets the app_name of this UpdateProductResponse.
资源空间名称。
:param app_name: The app_name of this UpdateProductResponse.
:type: str
"""
self._app_name = app_name
@property
def product_id(self):
"""Gets the product_id of this UpdateProductResponse.
产品ID,用于唯一标识一个产品,在物联网平台创建产品后由平台分配获得。
:return: The product_id of this UpdateProductResponse.
:rtype: str
"""
return self._product_id
@product_id.setter
def product_id(self, product_id):
"""Sets the product_id of this UpdateProductResponse.
产品ID,用于唯一标识一个产品,在物联网平台创建产品后由平台分配获得。
:param product_id: The product_id of this UpdateProductResponse.
:type: str
"""
self._product_id = product_id
@property
def name(self):
"""Gets the name of this UpdateProductResponse.
产品名称。
:return: The name of this UpdateProductResponse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this UpdateProductResponse.
产品名称。
:param name: The name of this UpdateProductResponse.
:type: str
"""
self._name = name
@property
def device_type(self):
"""Gets the device_type of this UpdateProductResponse.
设备类型。
:return: The device_type of this UpdateProductResponse.
:rtype: str
"""
return self._device_type
@device_type.setter
def device_type(self, device_type):
"""Sets the device_type of this UpdateProductResponse.
设备类型。
:param device_type: The device_type of this UpdateProductResponse.
:type: str
"""
self._device_type = device_type
@property
def protocol_type(self):
"""Gets the protocol_type of this UpdateProductResponse.
设备使用的协议类型。取值范围:MQTT,CoAP,HTTP,HTTPS,Modbus,ONVIF, OPC-UA,OPC-DA。
:return: The protocol_type of this UpdateProductResponse.
:rtype: str
"""
return self._protocol_type
@protocol_type.setter
def protocol_type(self, protocol_type):
"""Sets the protocol_type of this UpdateProductResponse.
设备使用的协议类型。取值范围:MQTT,CoAP,HTTP,HTTPS,Modbus,ONVIF, OPC-UA,OPC-DA。
:param protocol_type: The protocol_type of this UpdateProductResponse.
:type: str
"""
self._protocol_type = protocol_type
@property
def data_format(self):
"""Gets the data_format of this UpdateProductResponse.
设备上报数据的格式,取值范围:json,binary。
:return: The data_format of this UpdateProductResponse.
:rtype: str
"""
return self._data_format
@data_format.setter
def data_format(self, data_format):
"""Sets the data_format of this UpdateProductResponse.
设备上报数据的格式,取值范围:json,binary。
:param data_format: The data_format of this UpdateProductResponse.
:type: str
"""
self._data_format = data_format
@property
def manufacturer_name(self):
"""Gets the manufacturer_name of this UpdateProductResponse.
厂商名称。
:return: The manufacturer_name of this UpdateProductResponse.
:rtype: str
"""
return self._manufacturer_name
@manufacturer_name.setter
def manufacturer_name(self, manufacturer_name):
"""Sets the manufacturer_name of this UpdateProductResponse.
厂商名称。
:param manufacturer_name: The manufacturer_name of this UpdateProductResponse.
:type: str
"""
self._manufacturer_name = manufacturer_name
@property
def industry(self):
"""Gets the industry of this UpdateProductResponse.
设备所属行业。
:return: The industry of this UpdateProductResponse.
:rtype: str
"""
return self._industry
@industry.setter
def industry(self, industry):
"""Sets the industry of this UpdateProductResponse.
设备所属行业。
:param industry: The industry of this UpdateProductResponse.
:type: str
"""
self._industry = industry
@property
def description(self):
"""Gets the description of this UpdateProductResponse.
产品的描述信息。
:return: The description of this UpdateProductResponse.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this UpdateProductResponse.
产品的描述信息。
:param description: The description of this UpdateProductResponse.
:type: str
"""
self._description = description
@property
def service_capabilities(self):
"""Gets the service_capabilities of this UpdateProductResponse.
设备的服务能力列表。
:return: The service_capabilities of this UpdateProductResponse.
:rtype: list[ServiceCapability]
"""
return self._service_capabilities
@service_capabilities.setter
def service_capabilities(self, service_capabilities):
"""Sets the service_capabilities of this UpdateProductResponse.
设备的服务能力列表。
:param service_capabilities: The service_capabilities of this UpdateProductResponse.
:type: list[ServiceCapability]
"""
self._service_capabilities = service_capabilities
@property
def create_time(self):
"""Gets the create_time of this UpdateProductResponse.
在物联网平台创建产品的时间,格式:yyyyMMdd'T'HHmmss'Z',如20151212T121212Z。
:return: The create_time of this UpdateProductResponse.
:rtype: str
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this UpdateProductResponse.
在物联网平台创建产品的时间,格式:yyyyMMdd'T'HHmmss'Z',如20151212T121212Z。
:param create_time: The create_time of this UpdateProductResponse.
:type: str
"""
self._create_time = create_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateProductResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.293689
| 237
| 0.612336
|
794a66200f5207f38eaaa8e4960d08021e44e172
| 1,245
|
py
|
Python
|
ivi/extra/__init__.py
|
sacherjj/python-ivi
|
6dd1ba93d65dc30a652a3a1b34c66921d94315e8
|
[
"MIT"
] | 161
|
2015-01-23T17:43:01.000Z
|
2022-03-29T14:42:42.000Z
|
ivi/extra/__init__.py
|
sacherjj/python-ivi
|
6dd1ba93d65dc30a652a3a1b34c66921d94315e8
|
[
"MIT"
] | 45
|
2015-01-15T13:35:04.000Z
|
2021-06-03T01:58:55.000Z
|
ivi/extra/__init__.py
|
sacherjj/python-ivi
|
6dd1ba93d65dc30a652a3a1b34c66921d94315e8
|
[
"MIT"
] | 87
|
2015-01-31T10:55:23.000Z
|
2022-03-17T08:18:47.000Z
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__all__ = [
# Common functions
"common",
# Extra base classes
"dcpwr"]
from . import *
| 35.571429
| 77
| 0.771888
|
794a676e5550b241f480f09a3cc9014b2df6eaa6
| 334
|
gyp
|
Python
|
binding.gyp
|
dickmao/tree-sitter-perl
|
5308d95b43160c896410c886706c9aaf4d17d81d
|
[
"MIT"
] | 16
|
2020-12-07T22:38:43.000Z
|
2022-01-26T09:11:53.000Z
|
binding.gyp
|
dickmao/tree-sitter-perl
|
5308d95b43160c896410c886706c9aaf4d17d81d
|
[
"MIT"
] | 18
|
2022-02-24T17:03:28.000Z
|
2022-03-16T23:12:49.000Z
|
binding.gyp
|
dickmao/tree-sitter-perl
|
5308d95b43160c896410c886706c9aaf4d17d81d
|
[
"MIT"
] | 4
|
2021-06-30T20:47:32.000Z
|
2022-02-15T16:26:18.000Z
|
{
"targets": [
{
"target_name": "tree_sitter_perl_binding",
"include_dirs": [
"<!(node -e \"require('nan')\")",
"src"
],
"sources": [
"src/parser.c",
"src/scanner.cc",
"bindings/node/binding.cc"
],
"cflags_c": [
"-std=c99",
]
}
]
}
| 16.7
| 48
| 0.407186
|
794a6ba78d1b002329a8228e26711dab7c679707
| 5,251
|
py
|
Python
|
paragraph_encoder/multi_corpus.py
|
rajarshd/Multi-Step-Reasoning
|
3218d626839f7217554f38d82e00e4f460b508e4
|
[
"Apache-2.0"
] | 122
|
2019-03-12T13:57:10.000Z
|
2022-03-25T08:19:56.000Z
|
paragraph_encoder/multi_corpus.py
|
rajarshd/Multi-Step-Reasoning
|
3218d626839f7217554f38d82e00e4f460b508e4
|
[
"Apache-2.0"
] | 5
|
2019-09-25T00:55:20.000Z
|
2021-06-15T09:43:58.000Z
|
paragraph_encoder/multi_corpus.py
|
rajarshd/Multi-Step-Reasoning
|
3218d626839f7217554f38d82e00e4f460b508e4
|
[
"Apache-2.0"
] | 12
|
2019-04-08T03:04:09.000Z
|
2020-08-17T14:49:35.000Z
|
import numpy as np
import os
import pickle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import pairwise_distances
from tqdm import tqdm
from pathlib import Path
import argparse
import time
class MultiCorpus:
class Paragraph:
def __init__(self, args, pid, text, answer_span, qid, tfidf):
"""
:param args:
:param pid:
:param text:
:param answer_span: numpy array of size num_occ X 2
:param qid:
:param tfidf:
"""
self.args = args
self.pid = pid
self.text = text
self.answer_span = answer_span
self.ans_occurance = answer_span.shape[0]
self.qid = qid
self.tfidf_score = tfidf
self.model_score = None
class Question:
def __init__(self, args, qid, text, pids):
self.args = args
self.qid = qid
self.text = text
self.pids = pids
def __init__(self, args):
self.args = args
self.tfidf = TfidfVectorizer(strip_accents="unicode", stop_words="english")
self.questions = {}
self.paragraphs = {}
def dists(self, question, paragraphs):
text = []
for para in paragraphs:
text.append(" ".join("".join(s) for s in para.text))
try:
para_features = self.tfidf.fit_transform(text)
q_features = self.tfidf.transform([" ".join(question)])
except:
print("tfidf fit_transform threw an exception")
return [(paragraphs[i], float('inf')) for i in paragraphs]
dists = pairwise_distances(q_features, para_features, "cosine").ravel()
sorted_ix = np.lexsort(([x.start for x in paragraphs], dists)) # in case of ties, use the earlier paragraph
return [(paragraphs[i], dists[i]) for i in sorted_ix]
def dists_text(self, question, paragraph_texts):
"""
modified dist which takes in only paragraph object
:param question:
:param paragraphs:
:return:
"""
text = []
for para in paragraph_texts:
text.append(" ".join(para))
try:
para_features = self.tfidf.fit_transform(text)
q_features = self.tfidf.transform([question])
except:
print("tfidf fit_transform threw an exception")
return [(paragraph_texts[i], float('inf')) for i in paragraph_texts]
dists = pairwise_distances(q_features, para_features, "cosine").ravel()
sorted_ix = np.argsort(dists)
return [(paragraph_texts[i], dists[i]) for i in sorted_ix]
def addQuestionParas(self, qid, qtext, paragraphs):
# for para in paragraphs:
# para.text = [w.encode("ascii", errors="ignore").decode() for w in para.text]
scores = None
if self.args.calculate_tfidf:
scores = self.dists(qtext, paragraphs)
para_ids = []
for p_counter, p in enumerate(paragraphs):
tfidf_score = float('inf')
if scores is not None:
_, tfidf_score = scores[p_counter]
pid = qid + "_para_" + str(p_counter)
para_ids.append(pid)
paragraph = self.Paragraph(self.args, pid, p.text, p.answer_spans, qid, tfidf_score)
self.paragraphs[pid] = paragraph
question = self.Question(self.args, qid, qtext, para_ids)
self.questions[qid] = question
def addQuestionParas(self, qid, qtext, paragraph_texts, paragraph_answer_spans):
# for para in paragraphs:
# para.text = [w.encode("ascii", errors="ignore").decode() for w in para.text]
scores = None
if self.args.calculate_tfidf:
scores = self.dists_text(" ".join(qtext), paragraph_texts)
para_ids = []
for p_counter, p_text in enumerate(paragraph_texts):
tfidf_score = float('inf')
if scores is not None:
_, tfidf_score = scores[p_counter]
pid = qid + "_para_" + str(p_counter)
para_ids.append(pid)
paragraph = self.Paragraph(self.args, pid, p_text, paragraph_answer_spans[p_counter], qid, tfidf_score)
self.paragraphs[pid] = paragraph
question = self.Question(self.args, qid, qtext, para_ids)
self.questions[qid] = question
def get_topk_tfidf(corpus):
top1 = 0
top3 = 0
top5 = 0
for qid in corpus.questions:
para_scores = [(corpus.paragraphs[pid].tfidf_score, corpus.paragraphs[pid].ans_occurance) for pid in
corpus.questions[qid].pids]
sorted_para_scores = sorted(para_scores, key=lambda x: x[0])
# import pdb
# pdb.set_trace()
if sorted_para_scores[0][1] > 0:
top1 += 1
if sum([ans[1] for ans in sorted_para_scores[:3]]) > 0:
top3 += 1
if sum([ans[1] for ans in sorted_para_scores[:5]]) > 0:
top5 += 1
print(
'top1 = {}, top3 = {}, top5 = {} '.format(top1 / len(corpus.questions), top3 / len(corpus.questions),
top5 / len(corpus.questions)))
| 34.774834
| 116
| 0.582746
|
794a6c48d0b369cc2c7998a2f0baf5d95804c3ff
| 9,598
|
py
|
Python
|
suap_ead/template_settings.py
|
suap-ead/lib_suap_ead
|
480027d2bd1682e6f707c0638155f53e19f0a225
|
[
"MIT"
] | null | null | null |
suap_ead/template_settings.py
|
suap-ead/lib_suap_ead
|
480027d2bd1682e6f707c0638155f53e19f0a225
|
[
"MIT"
] | 3
|
2020-10-02T16:47:06.000Z
|
2021-11-04T00:55:30.000Z
|
suap_ead/template_settings.py
|
suap-ead/suap_ead
|
480027d2bd1682e6f707c0638155f53e19f0a225
|
[
"MIT"
] | null | null | null |
from sc4py.env import env, env_as_int, env_as_bool, env_as_list, env_from_json
import sc4net
# Development
DEBUG = env_as_bool('DJANGO_DEBUG', True)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {'console': {'class': 'logging.StreamHandler'}, },
'loggers': {
'': {'handlers': ['console'], 'level': 'DEBUG'},
'parso': {'handlers': ['console'], 'level': 'INFO'},
'asyncio': {'level': 'WARNING'}
},
}
if env_as_bool('DJANGO_DEBUG_SQL', False):
LOGGING['loggers']['django.db.backends'] = {'level': 'DEBUG', 'handlers': ['console']}
if env_as_bool('DJANGO_DEBUG_LDAP', False):
LOGGING['loggers']['django_auth_ldap'] = {'level': 'DEBUG', 'handlers': ['console']}
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': lambda request: request.get_host() in ['localhost', '127.0.0.1', 'sso'],
}
# Apps
MY_APPS = env_as_list('MY_APPS', '')
SUAP_EAD_LIBS = env_as_list('SUAP_EAD_LIBS', 'suap_ead')
DEV_APPS = env_as_list('DEV_APPS', 'debug_toolbar,django_extensions' if DEBUG else '')
THIRD_APPS = env_as_list('THIRD_APPS', 'rest_framework_swagger,'
'rest_framework')
DJANGO_APPS = env_as_list('DJANGO_APPS', 'django.contrib.admin,'
'django.contrib.auth,'
'django.contrib.contenttypes,'
'django.contrib.sessions,'
'django.contrib.messages,'
'django.contrib.staticfiles')
INSTALLED_APPS = MY_APPS + SUAP_EAD_LIBS + THIRD_APPS + DEV_APPS + DJANGO_APPS
# Middleware
MIDDLEWARE = env_as_list('MIDDLEWARE', 'django.middleware.security.SecurityMiddleware,'
'django.contrib.sessions.middleware.SessionMiddleware,'
'django.middleware.common.CommonMiddleware,'
'django.middleware.csrf.CsrfViewMiddleware,'
'django.contrib.auth.middleware.AuthenticationMiddleware,'
'django.contrib.messages.middleware.MessageMiddleware,'
'django.middleware.clickjacking.XFrameOptionsMiddleware')
if DEBUG:
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware']
# Template engine
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'suap_ead.context_processors.suap_ead',
'django.contrib.messages.context_processors.messages'
],
},
},
]
# Database
DATABASES = {
'default': {
'ENGINE': env('POSTGRES_ENGINE', 'django.db.backends.postgresql_psycopg2'),
'HOST': env('POSTGRES_HOST', 'db'),
'PORT': env('POSTGRES_PORT', '5432'),
'NAME': env('POSTGRES_DB', None),
'USER': env('POSTGRES_USER', 'postgres'),
'PASSWORD': env('POSTGRES_PASSWORD', 'postgres'),
}
}
# Routing
WSGI_APPLICATION = env('DJANGO_WSGI_APPLICATION', 'suap_ead.wsgi.application')
ALLOWED_HOSTS = env_as_list('DJANGO_ALLOWED_HOSTS', '*' if DEBUG else '')
USE_X_FORWARDED_HOST = True
ROOT_URLCONF = env('DJANGO_ROOT_URLCONF', 'urls')
URL_PATH_PREFIX = env('URL_PATH_PREFIX', 'sead/id/')
STATIC_URL = env('DJANGO_STATIC_URL', "/%s%s" % (URL_PATH_PREFIX, 'static/'))
STATIC_ROOT = env('DJANGO_STATIC_ROOT', "/static/" + URL_PATH_PREFIX)
MEDIA_URL = env('DJANGO_MEDIA_URL', "/%s%s" % (URL_PATH_PREFIX, 'media/'))
MEDIA_ROOT = env('DJANGO_MEDIA_ROOT', '/media/' + URL_PATH_PREFIX)
# Localization
LANGUAGE_CODE = env('DJANGO_USE_I18N', 'pt-br')
TIME_ZONE = env('DJANGO_USE_I18N', 'America/Fortaleza')
USE_I18N = env_as_bool('DJANGO_USE_I18N', True)
USE_L10N = env_as_bool('DJANGO_USE_L10N', True)
USE_TZ = env_as_bool('DJANGO_USE_TZ', True)
# REST Framework
REST_FRAMEWORK = {
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema',
'DEFAULT_RENDERER_CLASSES': [
'rest_framework.renderers.BrowsableAPIRenderer',
'rest_framework.renderers.JSONRenderer',
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'DEFAULT_AUTHENTICATION_CLASSES': (
'suap_ead.auth.SecretDelegateAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
],
}
# Email
EMAIL_BACKEND = env("DJANGO_EMAIL_BACKEND", 'django.core.mail.backends.smtp.EmailBackend')
EMAIL_HOST = env("DJANGO_EMAIL_HOST", 'localhost')
EMAIL_PORT = env_as_int("DJANGO_EMAIL_PORT", 25)
EMAIL_HOST_USER = env("DJANGO_EMAIL_HOST_USER", '')
EMAIL_HOST_PASSWORD = env("DJANGO_EMAIL_HOST_PASSWORD", '')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", '[SEAD] ')
EMAIL_USE_LOCALTIME = env_as_bool("DJANGO_EMAIL_USE_LOCALTIME", False)
EMAIL_USE_TLS = env_as_bool("DJANGO_EMAIL_USE_TLS", False)
EMAIL_USE_SSL = env_as_bool("DJANGO_EMAIL_USE_SSL", False)
EMAIL_SSL_CERTFILE = env("DJANGO_EMAIL_SSL_CERTFILE", None)
EMAIL_SSL_KEYFILE = env("DJANGO_EMAIL_SSL_KEYFILE", None)
EMAIL_TIMEOUT = env_as_int("DJANGO_EMAIL_TIMEOUT", None)
# Session
session_slug = URL_PATH_PREFIX.replace("/", "")
# SESSION_CACHE_ALIAS = env("DJANGO_SESSION_CACHE_ALIAS", 'default')
SESSION_COOKIE_AGE = env_as_int('DJANGO_SESSION_COOKIE_AGE', 1209600)
SESSION_COOKIE_DOMAIN = env('DJANGO_SESSION_COOKIE_DOMAIN', None)
SESSION_COOKIE_HTTPONLY = env_as_bool('SDJANGO_ESSION_COOKIE_HTTPONLY', True)
SESSION_COOKIE_NAME = env("DJANGO_SESSION_COOKIE_NAME", '%s_sessionid' % session_slug)
SESSION_COOKIE_PATH = env("DJANGO_SESSION_COOKIE_PATH", '/')
SESSION_COOKIE_SAMESITE = env("DJANGO_SESSION_COOKIE_SAMESITE", 'Lax')
SESSION_COOKIE_SECURE = env_as_bool('DJANGO_SESSION_COOKIE_SECURE', False)
SESSION_ENGINE = env("DJANGO_SESSION_ENGINE", 'redis_sessions.session')
SESSION_EXPIRE_AT_BROWSER_CLOSE = env_as_bool('DJANGO_SESSION_EXPIRE_AT_BROWSER_CLOSE', False)
SESSION_FILE_PATH = env('DJANGO_SESSION_FILE_PATH', None)
SESSION_SAVE_EVERY_REQUEST = env_as_bool('DJANGO_SESSION_SAVE_EVERY_REQUEST', False)
SESSION_SERIALIZER = env("DJANGO_SESSION_SERIALIZER", 'django.contrib.sessions.serializers.JSONSerializer')
SESSION_REDIS = {
'host': env("DJANGO_SESSION_REDIS_HOST", 'redis'),
'port': env_as_int("DJANGO_SESSION_REDIS_PORT", 6379),
'db': env_as_int("DJANGO_SESSION_REDIS_DB", 0),
'password': env("DJANGO_SESSION_REDIS_PASSWORD", 'redis_password'),
'prefix': env("DJANGO_SESSION_REDIS_PREFIX", '%s_session' % session_slug),
'socket_timeout': env("DJANGO_SESSION_REDIS_SOCKET_TIMEOUT", 0.1),
'retry_on_timeout': env("DJANGO_SESSION_REDIS_RETRY_ON_TIMEOUT", False),
}
# Auth and Security... some another points impact on security, take care!
SUAP_EAD_ID_JWT_AUTHORIZE = env("SUAP_EAD_ID_JWT_AUTHORIZE", '/ead/id/jwt/authorize/')
SUAP_EAD_ID_JWT_VALIDATE = env("SUAP_EAD_ID_JWT_VALIDATE", 'http://id:8000/ead/id/jwt/validate/')
SUAP_EAD_ID_JWT_LOGOUT = env("SUAP_EAD_ID_JWT_LOGOUT", 'http://id:8000/ead/id/logout/')
SUAP_EAD_ID_JWT_CLIENT_ID = env("SUAP_EAD_ID_JWT_CLIENT_ID", '_SUAP_EAD_ID_JWT_CLIENT_ID_')
SUAP_EAD_ID_JWT_SECRET = env("SUAP_EAD_ID_JWT_SECRET", '_SUAP_EAD_ID_JWT_SECRET_')
SUAP_EAD_UTILS_AUTH_JWT_BACKEND = env("SUAP_EAD_UTILS_AUTH_JWT_BACKEND", 'suap_ead.backends.PreExistentUserJwtBackend')
SECRET_KEY = env('DJANGO_SECRET_KEY', 'changeme')
LOGIN_URL = env("DJANGO_LOGIN_URL", URL_PATH_PREFIX + 'jwt/login')
LOGOUT_URL = env("DJANGO_LOGOUT_URL", URL_PATH_PREFIX + 'logout/')
LOGIN_REDIRECT_URL = env("DJANGO_LOGIN_REDIRECT_URL", '/' + URL_PATH_PREFIX)
LOGOUT_REDIRECT_URL = env("DJANGO_LOGOUT_REDIRECT_URL", '/' + URL_PATH_PREFIX)
AUTH_USER_MODEL = env("DJANGO_AUTH_USER_MODEL", 'auth.User')
AUTHENTICATION_BACKENDS = env_as_list('DJANGO_AUTHENTICATION_BACKENDS', 'django.contrib.auth.backends.ModelBackend')
USE_LDAP = env('LDAP_AUTH_URL', None) is not None and env('LDAP_AUTH_URL', None) != 'ldap://0.0.0.0'
if USE_LDAP:
LDAP_AUTH_URL = env('LDAP_AUTH_URL', '')
LDAP_AUTH_USE_TLS = env_as_bool('LDAP_AUTH_USE_TLS')
LDAP_AUTH_SEARCH_BASE = env('LDAP_AUTH_SEARCH_BASE', None)
LDAP_AUTH_OBJECT_CLASS = env('LDAP_AUTH_OBJECT_CLASS', 'user')
LDAP_AUTH_USER_FIELDS = env_from_json('LDAP_AUTH_USER_FIELDS', None, True)
LDAP_AUTH_USER_LOOKUP_FIELDS = env_as_list('LDAP_AUTH_USER_LOOKUP_FIELDS', 'username')
LDAP_AUTH_CLEAN_USER_DATA = env('LDAP_AUTH_CLEAN_USER_DATA')
LDAP_AUTH_SYNC_USER_RELATIONS = env('LDAP_AUTH_SYNC_USER_RELATIONS')
LDAP_AUTH_FORMAT_SEARCH_FILTERS = env('LDAP_AUTH_FORMAT_SEARCH_FILTERS')
LDAP_AUTH_ACTIVE_DIRECTORY_DOMAIN = env('LDAP_AUTH_ACTIVE_DIRECTORY_DOMAIN')
LDAP_AUTH_CONNECT_TIMEOUT = env_as_int('LDAP_AUTH_CONNECT_TIMEOUT', 10)
LDAP_AUTH_RECEIVE_TIMEOUT = env_as_int('LDAP_AUTH_RECEIVE_TIMEOUT', 10)
LDAP_AUTH_FORMAT_USERNAME = env('LDAP_AUTH_FORMAT_USERNAME', 'django_python3_ldap.format_username_active_directory')
LDAP_ACTIVE_VALUE = env('LDAP_ACTIVE_VALUE', '512')
AUTHENTICATION_BACKENDS = env_as_list('DJANGO_AUTHENTICATION_BACKENDS', 'django_python3_ldap.auth.LDAPBackend')
sc4net.default_headers = {"Authorization": "Secret %s" % SUAP_EAD_ID_JWT_SECRET}
| 48.231156
| 120
| 0.715982
|
794a6c87fe3e370c7407a46b548b1154bee13b61
| 8,469
|
py
|
Python
|
src/pretalx/mail/context.py
|
lili668668/pretalx
|
5ba2185ffd7c5f95254aafe25ad3de340a86eadb
|
[
"Apache-2.0"
] | null | null | null |
src/pretalx/mail/context.py
|
lili668668/pretalx
|
5ba2185ffd7c5f95254aafe25ad3de340a86eadb
|
[
"Apache-2.0"
] | null | null | null |
src/pretalx/mail/context.py
|
lili668668/pretalx
|
5ba2185ffd7c5f95254aafe25ad3de340a86eadb
|
[
"Apache-2.0"
] | null | null | null |
from django.dispatch import receiver
from django.template.defaultfilters import date as _date
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from pretalx.mail.placeholders import SimpleFunctionalMailTextPlaceholder
from pretalx.mail.signals import register_mail_placeholders
def get_mail_context(**kwargs):
event = kwargs["event"]
if "submission" in kwargs and "slot" not in kwargs:
slot = kwargs["submission"].slot
if slot and slot.start and slot.room:
kwargs["slot"] = kwargs["submission"].slot
context = {}
for recv, placeholders in register_mail_placeholders.send(sender=event):
if not isinstance(placeholders, (list, tuple)):
placeholders = [placeholders]
for placeholder in placeholders:
if all(required in kwargs for required in placeholder.required_context):
context[placeholder.identifier] = placeholder.render(kwargs)
return context
def get_available_placeholders(event, kwargs):
params = {}
for recv, placeholders in register_mail_placeholders.send(sender=event):
if not isinstance(placeholders, (list, tuple)):
placeholders = [placeholders]
for placeholder in placeholders:
if all(required in kwargs for required in placeholder.required_context):
params[placeholder.identifier] = placeholder
return params
@receiver(register_mail_placeholders, dispatch_uid="pretalx_register_base_placeholders")
def base_placeholders(sender, **kwargs):
placeholders = [
SimpleFunctionalMailTextPlaceholder(
"event",
["event"],
lambda event: event.name,
lambda event: event.name,
_("The event's full name"),
),
SimpleFunctionalMailTextPlaceholder(
"event_name",
["event"],
lambda event: event.name,
lambda event: event.name,
_("The event's full name"),
),
SimpleFunctionalMailTextPlaceholder(
"event_slug",
["event"],
lambda event: event.slug,
lambda event: event.slug,
_("The event's short form, used in URLs"),
),
SimpleFunctionalMailTextPlaceholder(
"event_url",
["event"],
lambda event: event.urls.base.full(),
lambda event: f"https://pretalx.com/{event.slug}/",
_("The event's public base URL"),
),
SimpleFunctionalMailTextPlaceholder(
"event_schedule_url",
["event"],
lambda event: event.urls.schedule.full(),
lambda event: f"https://pretalx.com/{event.slug}/schedule/",
_("The event's public schedule URL"),
),
SimpleFunctionalMailTextPlaceholder(
"event_cfp_url",
["event"],
lambda event: event.cfp.urls.base.full(),
lambda event: f"https://pretalx.com/{event.slug}/cfp",
_("The event's public CfP URL"),
),
SimpleFunctionalMailTextPlaceholder(
"all_submissions_url",
["event", "user"],
lambda event, user: event.urls.user_submissions.full(),
"https://pretalx.example.com/democon/me/submissions/",
_("URL to a user's list of proposals"),
),
SimpleFunctionalMailTextPlaceholder(
"deadline",
["event"],
lambda event: _date(
event.cfp.deadline.astimezone(event.tz), "SHORT_DATETIME_FORMAT"
)
if event.cfp.deadline
else "",
lambda event: _date(
event.cfp.deadline.astimezone(event.tz), "SHORT_DATETIME_FORMAT"
)
if event.cfp.deadline
else "",
_("The general CfP deadline"),
),
SimpleFunctionalMailTextPlaceholder(
"code",
["submission"],
lambda submission: submission.code,
"F8VVL",
_("The proposal's unique ID"),
),
SimpleFunctionalMailTextPlaceholder(
"talk_url",
["slot"],
lambda slot: slot.submission.urls.public.full(),
"https://pretalx.example.com/democon/schedule/F8VVL/",
_("The proposal's public URL"),
),
SimpleFunctionalMailTextPlaceholder(
"edit_url",
["submission"],
lambda submission: submission.urls.user_base.full(),
"https://pretalx.example.com/democon/me/submissions/F8VVL/",
_("The speaker's edit page for the proposal"),
),
SimpleFunctionalMailTextPlaceholder(
"submission_url",
["submission"],
lambda submission: submission.urls.user_base.full(),
"https://pretalx.example.com/democon/me/submissions/F8VVL/",
_("The speaker's edit page for the proposal"),
),
SimpleFunctionalMailTextPlaceholder(
"confirmation_link",
["submission"],
lambda submission: submission.urls.confirm.full(),
"https://pretalx.example.com/democon/me/submissions/F8VVL/confirm",
_("Link to confirm a proposal after it has been accepted."),
),
SimpleFunctionalMailTextPlaceholder(
"withdraw_link",
["submission"],
lambda submission: submission.urls.withdraw.full(),
"https://pretalx.example.com/democon/me/submissions/F8VVL/withdraw",
_("Link to withdraw the proposal"),
),
SimpleFunctionalMailTextPlaceholder(
"proposal_title",
["submission"],
lambda submission: submission.title,
"Open-architected uniform middleware",
_("The proposal's title"),
),
SimpleFunctionalMailTextPlaceholder(
"submission_title",
["submission"],
lambda submission: submission.title,
"Open-architected uniform middleware",
_("The proposal's title"),
),
SimpleFunctionalMailTextPlaceholder(
"speakers",
["submission"],
lambda submission: submission.display_speaker_names,
"Open-architected uniform middleware",
_("The name(s) of all speakers in this proposal."),
),
SimpleFunctionalMailTextPlaceholder(
"track_name",
["submission"],
lambda submission: str(submission.track.name) if submission.track else "",
"Science",
_("The track the proposal belongs to"),
),
SimpleFunctionalMailTextPlaceholder(
"session_start_date",
["slot"],
lambda slot: _date(slot.start, "SHORT_DATE_FORMAT"),
_date(now(), "SHORT_DATE_FORMAT"),
_("The session's start date"),
),
SimpleFunctionalMailTextPlaceholder(
"session_start_time",
["slot"],
lambda slot: _date(slot.start, "SHORT_TIME_FORMAT"),
_date(now(), "SHORT_TIME_FORMAT"),
_("The session's start time"),
),
SimpleFunctionalMailTextPlaceholder(
"session_end_date",
["slot"],
lambda slot: _date(slot.real_end, "SHORT_DATE_FORMAT"),
_date(now(), "SHORT_DATE_FORMAT"),
_("The session's end date"),
),
SimpleFunctionalMailTextPlaceholder(
"session_end_time",
["slot"],
lambda slot: _date(slot.real_end, "SHORT_DATE_FORMAT"),
_date(now(), "SHORT_TIME_FORMAT"),
_("The session's end time"),
),
SimpleFunctionalMailTextPlaceholder(
"session_room",
["slot"],
lambda slot: str(slot.room),
_("Room 101"),
_("The session's room"),
),
SimpleFunctionalMailTextPlaceholder(
"name",
["user"],
lambda user: user.name,
_("Jane Doe"),
_("The addressed user's full name"),
),
SimpleFunctionalMailTextPlaceholder(
"email",
["user"],
lambda user: user.email,
"jane@example.org",
_("The addressed user's email address"),
),
]
return placeholders
| 37.30837
| 88
| 0.572205
|
794a6d8d058179be1920c957cdaf70dff8908c0c
| 5,182
|
py
|
Python
|
training.py
|
alifkurniawan/tesis
|
6330dba32f5dc12785e956875c94d83344d788a8
|
[
"MIT"
] | null | null | null |
training.py
|
alifkurniawan/tesis
|
6330dba32f5dc12785e956875c94d83344d788a8
|
[
"MIT"
] | 3
|
2022-01-13T03:13:37.000Z
|
2022-03-12T00:48:18.000Z
|
training.py
|
alifkurniawan/tesis
|
6330dba32f5dc12785e956875c94d83344d788a8
|
[
"MIT"
] | null | null | null |
"""
This file is part of the OpenProtein project.
For license information, please see the LICENSE file in the root directory.
"""
import json
import time
import numpy as np
import requests
import torch.optim as optim
from util import set_experiment_id, write_out, write_model_to_disk, write_result_summary
def train_model(data_set_identifier, model, train_loader, validation_loader,
learning_rate, minibatch_size=64, eval_interval=50, hide_ui=False,
use_gpu=False, minimum_updates=1000,
optimizer_type='adam', restart=False):
set_experiment_id(data_set_identifier, learning_rate, minibatch_size)
validation_dataset_size = validation_loader.dataset.__len__()
if use_gpu:
model = model.cuda()
if optimizer_type == 'adam':
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
elif optimizer_type == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
elif optimizer_type == 'rmsprop':
optimizer = optim.RMSprop(model.parameters(), lr=learning_rate)
else:
optimizer = optim.AdamW(model.parameters(), lr=learning_rate)
if restart:
scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=32)
sample_num = list()
train_loss_values = list()
train_drmsd_values = list()
validation_loss_values = list()
validation_angles_loss_values = list()
best_model_loss = 1e20
best_model_minibatch_time = None
best_model_path = None
best_json_data = None
stopping_condition_met = False
minibatches_proccesed = 0
while not stopping_condition_met:
# for i in range(2):
optimizer.zero_grad()
model.zero_grad()
loss_tracker = np.zeros(0)
drmsd_tracker = np.zeros(0)
for _minibatch_id, training_minibatch in enumerate(train_loader, 0):
minibatches_proccesed += 1
start_compute_loss = time.time()
loss, drmsd_avg = model.compute_loss(training_minibatch)
write_out("Train loss:", float(loss))
start_compute_grad = time.time()
loss.backward()
loss_tracker = np.append(loss_tracker, float(loss))
drmsd_tracker = np.append(drmsd_tracker, float(drmsd_avg))
end = time.time()
write_out("Loss time:", start_compute_grad - start_compute_loss, "Grad time:",
end - start_compute_grad)
optimizer.step()
if restart:
scheduler.step()
optimizer.zero_grad()
model.zero_grad()
# for every eval_interval samples, plot performance on the validation set
if minibatches_proccesed % eval_interval == 0:
write_out("Testing model on validation set...")
train_loss = float(loss_tracker.mean())
train_drmsd = float(drmsd_tracker.mean())
loss_tracker = np.zeros(0)
drmsd_tracker = np.zeros(0)
validation_loss, json_data, _, validation_angles_loss = model.evaluate_model(validation_loader)
if validation_loss < best_model_loss:
best_model_loss = validation_loss
best_model_minibatch_time = minibatches_proccesed
best_model_path = write_model_to_disk(model)
best_json_data = json_data
write_out("Validation loss:", validation_loss, "Train loss:", train_loss, "Train drmsd:", train_drmsd)
write_out("Best model so far (validation loss): ", best_model_loss, "at time",
best_model_minibatch_time)
write_out("Best model stored at " + best_model_path)
write_out("Minibatches processed:", minibatches_proccesed)
sample_num.append(minibatches_proccesed)
train_loss_values.append(train_loss)
train_drmsd_values.append(train_drmsd)
validation_loss_values.append(validation_loss)
validation_angles_loss_values.append(validation_angles_loss)
json_data["validation_dataset_size"] = validation_dataset_size
json_data["sample_num"] = sample_num
json_data["train_loss_values"] = train_loss_values
json_data["train_drmsd_values"] = train_drmsd_values
json_data["validation_loss_values"] = validation_loss_values
json_data['validation_angles_loss_values'] = validation_angles_loss_values
write_out(json_data)
if not hide_ui:
res = requests.post('http://localhost:5000/graph', json=json_data)
if res.ok:
print(res.json())
if minibatches_proccesed > minimum_updates and minibatches_proccesed \
>= best_model_minibatch_time + minimum_updates:
stopping_condition_met = True
break
write_result_summary(best_model_loss)
write_result_summary(json.dumps(best_json_data))
return best_model_path
| 42.47541
| 118
| 0.642609
|
794a6daa4fa6bedfe3855453fb2f7ae6a3432899
| 61
|
py
|
Python
|
tests/__init__.py
|
Pandelytics/pandelytics
|
5950d4a95595dadf076ac9270be0dbcdcfa59a1a
|
[
"Apache-2.0"
] | null | null | null |
tests/__init__.py
|
Pandelytics/pandelytics
|
5950d4a95595dadf076ac9270be0dbcdcfa59a1a
|
[
"Apache-2.0"
] | 11
|
2020-04-02T22:36:36.000Z
|
2020-09-27T11:19:23.000Z
|
tests/__init__.py
|
Pandelytics/pandelytics
|
5950d4a95595dadf076ac9270be0dbcdcfa59a1a
|
[
"Apache-2.0"
] | 1
|
2020-10-07T15:48:06.000Z
|
2020-10-07T15:48:06.000Z
|
from pandelytics import __version__
import pandelytics.search
| 30.5
| 35
| 0.901639
|
794a6e0ccb640a9f5d410a879ab1a129f2099d33
| 459
|
py
|
Python
|
store_backend/plugins/migrations/0003_plugin_icon.py
|
EUGINELETHAL/ChRIS_store
|
b842dbfa80f29f86468fe0ebd3514aaac4898717
|
[
"MIT"
] | 11
|
2018-03-23T19:27:10.000Z
|
2021-04-30T16:40:04.000Z
|
store_backend/plugins/migrations/0003_plugin_icon.py
|
EUGINELETHAL/ChRIS_store
|
b842dbfa80f29f86468fe0ebd3514aaac4898717
|
[
"MIT"
] | 46
|
2018-05-21T14:54:43.000Z
|
2022-01-28T01:37:57.000Z
|
store_backend/plugins/migrations/0003_plugin_icon.py
|
EUGINELETHAL/ChRIS_store
|
b842dbfa80f29f86468fe0ebd3514aaac4898717
|
[
"MIT"
] | 11
|
2018-03-28T04:37:25.000Z
|
2021-05-28T06:40:30.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2018-07-20 15:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('plugins', '0002_auto_20180503_1606'),
]
operations = [
migrations.AddField(
model_name='plugin',
name='icon',
field=models.URLField(blank=True, max_length=300),
),
]
| 21.857143
| 62
| 0.616558
|
794a6ed6586e8b15df7b2d5199b0a4f374b7028b
| 196
|
py
|
Python
|
Django_Project/django/Scripts/django-admin.py
|
mitchrule/Miscellaneous
|
57f7453e0f97b6fe8f186620ebe94f0ca736cc1f
|
[
"MIT"
] | null | null | null |
Django_Project/django/Scripts/django-admin.py
|
mitchrule/Miscellaneous
|
57f7453e0f97b6fe8f186620ebe94f0ca736cc1f
|
[
"MIT"
] | null | null | null |
Django_Project/django/Scripts/django-admin.py
|
mitchrule/Miscellaneous
|
57f7453e0f97b6fe8f186620ebe94f0ca736cc1f
|
[
"MIT"
] | null | null | null |
#!C:\Users\Mitch\Google Drive\Programming\Python\Django_Project\django\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 32.666667
| 89
| 0.806122
|
794a6f025f2d9e02b7c4a0b62cfb0469302e464b
| 813
|
py
|
Python
|
events/CreationSex.py
|
crexodon/rating.chat
|
d3f2b2cea6761c51041d0a96856cc1e4f8eb138f
|
[
"MIT"
] | null | null | null |
events/CreationSex.py
|
crexodon/rating.chat
|
d3f2b2cea6761c51041d0a96856cc1e4f8eb138f
|
[
"MIT"
] | null | null | null |
events/CreationSex.py
|
crexodon/rating.chat
|
d3f2b2cea6761c51041d0a96856cc1e4f8eb138f
|
[
"MIT"
] | 1
|
2018-12-02T09:43:55.000Z
|
2018-12-02T09:43:55.000Z
|
from event_base.event import EventBase
class CreationSex(EventBase):
def __init__(self, chat_id: int):
super().__init__(chat_id=chat_id, prev_event_ids=['creation_age'], event_id="creation_sex",
message_text="Wähle dein Geschlect"
, buttons=[{'text': 'Männlich', 'next_event_id': 'media_start_media', 'decision_id': 1},
{'text': 'Weiblich', 'next_event_id': 'media_start_media','decision_id': 2},
{'text': 'divers', 'next_event_id': 'media_start_media', 'decision_id': 3}])
@staticmethod
def is_available(profile):
return True
@staticmethod
def react(profile, decision_id):
profile['basic']['sex'] = int(decision_id)
return profile
| 35.347826
| 113
| 0.587946
|
794a6f414d20e5b4a1e0bd226ca6f154a5c7b328
| 3,581
|
py
|
Python
|
noise.py
|
dave-leblanc/pytile
|
0584a10a2895245dd4515e00e153673bd40cbdbc
|
[
"BSD-3-Clause"
] | 8
|
2018-04-01T17:40:57.000Z
|
2021-08-13T07:01:17.000Z
|
noise.py
|
dave-leblanc/pytile
|
0584a10a2895245dd4515e00e153673bd40cbdbc
|
[
"BSD-3-Clause"
] | null | null | null |
noise.py
|
dave-leblanc/pytile
|
0584a10a2895245dd4515e00e153673bd40cbdbc
|
[
"BSD-3-Clause"
] | 5
|
2018-01-27T01:57:35.000Z
|
2021-03-07T08:20:51.000Z
|
#!/usr/bin/python
import sys, os, random, math
from numpy import *
# Perlin noise object, allows for generation of either an arbitrary amount of
# non-repetitive noise or for the generation of tileable textures
class Perlin2D(object):
"""Extensible Perlin noise, non-repeating"""
def __init__(self, xdims, ydims, seed, inter, ppp, persistence, octaves):
"""Initialise the noise generator"""
self.randoms = self.regen_seeds(seed, octaves)
self.xdims = xdims
self.ydims = ydims
self.inter = inter
self.ppp = ppp
self.persistence = persistence
self.octaves = octaves
if inter == "linear":
self.inter = self.linear_interpolate_2D
elif inter == "cosine":
self.inter = self.cosine_interpolate_2D
self.gen_2D_noise()
def gen_2D_noise(self):
"""Return a set of arrays representing each octave of noise"""
self.octsets = []
for o in range(self.octaves):
# Generate set of X values for generating the set of y values
xrandoms = self.regen_seeds(self.randoms[o], self.xdims + 1)
a = []
for x in xrandoms:
random.seed(x)
b = []
for y in range(self.ydims + 1):
b.append(self.get_random())
a.append(b)
a = array(a)
self.octsets.append(a)
return True
def get_at_point_2D(self, x, y):
"""Return some arbitrary point on the noise plane"""
amps = []
zvals = []
# Find nearest points in x and y
for o, octset in enumerate(self.octsets):
# Doing this every time probably fine, 2^x is a quick operation
pow2o = pow(2,o)
positionX, remainderX = divmod(x, self.ppp / pow2o)
positionY, remainderY = divmod(y, self.ppp / pow2o)
if remainderX != 0:
percentalongX = float(remainderX) / self.ppp * pow2o
else:
percentalongX = 0
if remainderY != 0:
percentalongY = float(remainderY) / self.ppp * pow2o
else:
percentalongY = 0
zval = self.inter(octset[positionX][positionY],
octset[positionX+1][positionY],
octset[positionX][positionY+1],
octset[positionX+1][positionY+1],
percentalongX, percentalongY)
zvals.append(zval)
amps.append(pow(self.persistence, o))
return reduce(lambda x, y: x+(y[0]*y[1]), zip(zvals, amps), 0) / sum(amps)
def regen_seeds(self, random_seed, values):
random.seed(random_seed)
randoms = []
for o in range(values):
randoms.append(random.randint(0,100))
return randoms
def get_random(self):
return random.uniform(-1,1)
def linear_interpolate(self, a, b, x):
return a*(1-x) + b*x
def cosine_interpolate(self, a, b, x):
ft = x * math.pi
f = (1 - math.cos(ft)) * 0.5
return a*(1-f) + b*f
def cosine_interpolate_2D(self, v1, v2, v3, v4, x, y):
A = self.cosine_interpolate(v1, v2, x)
B = self.cosine_interpolate(v3, v4, x)
return self.cosine_interpolate(A, B, y)
def linear_interpolate_2D(self, v1, v2, v3, v4, x, y):
A = self.linear_interpolate(v1, v2, x)
B = self.linear_interpolate(v3, v4, x)
return self.linear_interpolate(A, B, y)
| 34.76699
| 83
| 0.556269
|
794a6fb8af80af0ccbd98d6f42a33590f5c9a58e
| 1,756
|
py
|
Python
|
spirit/utils/forms.py
|
amitra/BikeMaps
|
eb80eed2e3159ad9c4e46427a9f488e1221794fa
|
[
"MIT"
] | 3
|
2017-12-01T08:17:38.000Z
|
2021-01-29T15:40:06.000Z
|
spirit/utils/forms.py
|
amitra/BikeMaps
|
eb80eed2e3159ad9c4e46427a9f488e1221794fa
|
[
"MIT"
] | 9
|
2020-06-05T17:44:02.000Z
|
2022-01-13T00:42:34.000Z
|
spirit/utils/forms.py
|
amitra/BikeMaps
|
eb80eed2e3159ad9c4e46427a9f488e1221794fa
|
[
"MIT"
] | 1
|
2020-11-08T21:47:32.000Z
|
2020-11-08T21:47:32.000Z
|
#-*- coding: utf-8 -*-
from django import forms
from django.utils.html import conditional_escape, mark_safe
from django.utils.encoding import smart_text
class NestedModelChoiceField(forms.ModelChoiceField):
"""A ModelChoiceField that groups parents and childrens"""
# TODO: subclass ModelChoiceIterator, remove _populate_choices()
def __init__(self, related_name, parent_field, label_field, *args, **kwargs):
"""
@related_name: related_name or "FOO_set"
@parent_field: ForeignKey('self') field, use 'name_id' to save some queries
@label_field: field for obj representation
"""
super(NestedModelChoiceField, self).__init__(*args, **kwargs)
self.related_name = related_name
self.parent_field = parent_field
self.label_field = label_field
self._populate_choices()
def _populate_choices(self):
# This is *hackish* but simpler than subclassing ModelChoiceIterator
choices = [("", self.empty_label), ]
kwargs = {self.parent_field: None, }
queryset = self.queryset.filter(**kwargs)\
.prefetch_related(self.related_name)
for parent in queryset:
choices.append((self.prepare_value(parent), self.label_from_instance(parent)))
choices.extend([(self.prepare_value(children), self.label_from_instance(children))
for children in getattr(parent, self.related_name).all()])
self.choices = choices
def label_from_instance(self, obj):
level_indicator = u""
if getattr(obj, self.parent_field):
level_indicator = u"--- "
return mark_safe(level_indicator + conditional_escape(smart_text(getattr(obj, self.label_field))))
| 40.837209
| 106
| 0.677677
|
794a705ef9e0fd05d8bf7d1590461bb5ba81d849
| 746
|
py
|
Python
|
runs/snort/10KB/src8-tgt1/ssl-par-ssl-iter00200.cfg.py
|
Largio/broeval
|
89e831d07f066100afdd1a5b220f9f08f1c10b3d
|
[
"MIT"
] | null | null | null |
runs/snort/10KB/src8-tgt1/ssl-par-ssl-iter00200.cfg.py
|
Largio/broeval
|
89e831d07f066100afdd1a5b220f9f08f1c10b3d
|
[
"MIT"
] | null | null | null |
runs/snort/10KB/src8-tgt1/ssl-par-ssl-iter00200.cfg.py
|
Largio/broeval
|
89e831d07f066100afdd1a5b220f9f08f1c10b3d
|
[
"MIT"
] | null | null | null |
# Write results to this file
OUTFILE = 'runs/snort/10KB/src8-tgt1/ssl-par-ssl-iter00200.result.csv'
# Source computers for the request
SOURCE = ['10.0.0.11', '10.0.0.12', '10.0.0.13', '10.0.0.14', '10.0.0.31', '10.0.0.32', '10.0.0.33', '10.0.0.34']
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# IDS Mode. (ATM: noids, min, max, http, ssl, ftp, icmp, mysql)
IDSMODE = 'ssl'
# Connection mode (par = parallel, seq = sequential)
MODE = 'par'
# Number of evaluation repititions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repitition
ITER = 200
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 4
# Protocol to be used e.g. HTTP, SSL, FTP, MYSQL
PROTOCOL = 'ssl'
| 27.62963
| 113
| 0.672922
|
794a7075ba4728502c15384bc56216871f997972
| 4,377
|
py
|
Python
|
tests/functional/conftest.py
|
william-richard/chili-pepper
|
812081fafe443f0a16ac017e1386e4725be9f576
|
[
"Apache-2.0"
] | 2
|
2020-06-22T15:47:18.000Z
|
2021-06-30T12:24:07.000Z
|
tests/functional/conftest.py
|
william-richard/chili-pepper
|
812081fafe443f0a16ac017e1386e4725be9f576
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/conftest.py
|
william-richard/chili-pepper
|
812081fafe443f0a16ac017e1386e4725be9f576
|
[
"Apache-2.0"
] | null | null | null |
import importlib
import os
import sys
import boto3
import pytest
from moto import mock_cloudformation, mock_iam, mock_lambda, mock_s3, mock_kms
@pytest.fixture(autouse=True)
def apply_moto_mocks():
with mock_cloudformation(), mock_iam(), mock_s3(), mock_lambda(), mock_kms():
boto3.setup_default_session()
yield None
@pytest.fixture(autouse=True)
def reset_sys_path():
# main.CLI.deploy can add stuff to sys.path, and load up modules that we'll want to re-import differently in subsequent tests
# like the files created in create_app_structure
# Resetting sys.path and the imported modules before each test will make things more reliable
#
# resetting sys.path probably has no effect - main.CLI.deploy should be putting new paths at the start of sys.path
# and so the new paths will be checked first, but it doesn't hurt
#
# resetting sys.modules absolutely has an effect, because without this, the first instance of a module (like the test app.tasks module)
# that was imported will be used for all the other tests.
original_sys_path = list(sys.path)
original_sys_modules_keys = list(sys.modules.keys())
try:
importlib.invalidate_caches()
except AttributeError:
# python2.7 does not have invalidate_caches
pass
yield
# https://www.oreilly.com/library/view/python-cookbook/0596001673/ch14s02.html
for m in list(sys.modules.keys()):
if m not in original_sys_modules_keys:
del sys.modules[m]
sys.path = original_sys_path
def create_chili_pepper_s3_bucket():
# type: () -> str
s3_client = boto3.client("s3")
bucket_name = "chili_pepper_test_bucket"
s3_client.create_bucket(Bucket=bucket_name)
# TODO make this optional, so we can test that our code can gracefully handle when bucket versioning is not enabled
s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={"Status": "Enabled"})
return bucket_name
def create_app_structure(
tmp_path,
pytest_request_fixture,
bucket_name="you_forgot_to_call_conftest.create_chili_pepper_s3_bucket",
runtime="python3.7",
include_requirements=False,
environment_variables=None,
kms_key_arn=None,
):
if environment_variables is None:
environment_variables = dict()
# pytest_request should be the pytest request fixture https://docs.pytest.org/en/latest/reference.html#request
app_dir = tmp_path / "app"
app_dir.mkdir()
tasks_py = app_dir / "tasks.py"
tasks_py_body = """
from chili_pepper.app import ChiliPepper
app = ChiliPepper().create_app(app_name="demo")
app.conf['aws']['bucket_name'] = "{bucket_name}"
app.conf['aws']['runtime'] = "{runtime}"
{kms_key_arn_line}
@app.task(environment_variables={environment_variables})
def say_hello(event, context):
return_value = dict()
return_value["Hello"] = "World!"
print(return_value) # moto doesn't handle returns from lambda functions :(
return return_value
""".format(
bucket_name=bucket_name,
runtime=runtime,
environment_variables=environment_variables,
kms_key_arn_line="app.conf['aws']['kms_key'] = '{kms_key_arn}'".format(kms_key_arn=kms_key_arn) if kms_key_arn else "",
)
# python 2.7 compatibility
# https://stackoverflow.com/a/50139419
if hasattr(tasks_py_body, "decode"):
tasks_py_body = tasks_py_body.decode("utf8")
tasks_py.write_text(tasks_py_body, encoding="utf8")
if include_requirements:
# need to find the code directory, so we can tell the lambda container to install chili-pepper
test_file_path_str = str(pytest_request_fixture.fspath)
path_head_str, path_tail_str = os.path.split(test_file_path_str)
while path_tail_str != "tests":
path_head_str, path_tail_str = os.path.split(path_head_str)
code_root_dir = path_head_str
requirements_txt = app_dir / "requirements.txt"
requirements_txt_body = """
{code_root_dir}
""".format(
code_root_dir=code_root_dir
)
if hasattr(requirements_txt_body, "decode"):
requirements_txt_body = requirements_txt_body.decode("utf8")
requirements_txt.write_text(requirements_txt_body, encoding="utf8")
init_py = app_dir / "__init__.py"
init_py.touch()
return app_dir
| 37.09322
| 139
| 0.71533
|
794a718fd2137c35d1d51ee018de9d611cc5a6c0
| 5,595
|
py
|
Python
|
metar.py
|
danielwise904/METARMap
|
7e61e3a5d16adc6413c4f71e494033b4c64edbc4
|
[
"MIT"
] | null | null | null |
metar.py
|
danielwise904/METARMap
|
7e61e3a5d16adc6413c4f71e494033b4c64edbc4
|
[
"MIT"
] | null | null | null |
metar.py
|
danielwise904/METARMap
|
7e61e3a5d16adc6413c4f71e494033b4c64edbc4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import urllib.request
import xml.etree.ElementTree as ET
import board
import neopixel
import time
# NeoPixel LED Configuration
LED_COUNT = 150 # Number of LED pixels.
LED_PIN = board.D18 # GPIO pin connected to the pixels (18 is PCM).
LED_BRIGHTNESS = 0.1 # Float from 0.0 (min) to 1.0 (max)
LED_ORDER = neopixel.GRB # Strip type and colour ordering
COLOR_VFR = (255,0,0) # Green
COLOR_VFR_FADE = (125,0,0) # Green Fade for wind
COLOR_MVFR = (0,0,255) # Blue
COLOR_MVFR_FADE = (0,0,125) # Blue Fade for wind
COLOR_IFR = (0,255,0) # Red
COLOR_IFR_FADE = (0,125,0) # Red Fade for wind
COLOR_LIFR = (0,125,125) # Magenta
COLOR_LIFR_FADE = (0,75,75) # Magenta Fade for wind
COLOR_CLEAR = (0,0,0) # Clear
COLOR_LIGHTNING = (255,255,255) # White
# Do you want the METARMap to be static to just show flight conditions, or do you also want blinking/fading based on current wind conditions
ACTIVATE_WINDCONDITION_ANIMATION = True # Set this to False for Static or True for animated wind conditions
#Do you want the Map to Flash white for lightning in the area
ACTIVATE_LIGHTNING_ANIMATION = True # Set this to False for Static or True for animated Lightning
# Fade instead of blink
FADE_INSTEAD_OF_BLINK = True # Set to False if you want blinking
# Blinking Windspeed Threshold
WIND_BLINK_THRESHOLD = 30 # Knots of windspeed
ALWAYS_BLINK_FOR_GUSTS = False # Always animate for Gusts (regardless of speeds)
# Blinking Speed in seconds
BLINK_SPEED = 1.0 # Float in seconds, e.g. 0.5 for half a second
# Initialize the LED strip
pixels = neopixel.NeoPixel(LED_PIN, LED_COUNT, brightness = LED_BRIGHTNESS, pixel_order = LED_ORDER, auto_write = False)
# Read the airports file to retrieve list of airports and use as order for LEDs
with open("/home/pi/METARMap/airports") as f:
airports = f.readlines()
airports = [x.strip() for x in airports]
# Retrieve METAR from aviationweather.gov data server
# Details about parameters can be found here: https://www.aviationweather.gov/dataserver/example?datatype=metar
url = "https://www.aviationweather.gov/adds/dataserver_current/httpparam?dataSource=metars&requestType=retrieve&format=xml&hoursBeforeNow=5&mostRecentForEachStation=true&stationString=" + ",".join([item for item in airports if item != "NULL"])
print(url)
content = urllib.request.urlopen(url).read()
# Retrieve flying conditions from the service response and store in a dictionary for each airport
root = ET.fromstring(content)
conditionDict = { "": {"flightCategory" : "", "windSpeed" : 0, "windGust" : False, "lightning": False } }
for metar in root.iter('METAR'):
stationId = metar.find('station_id').text
if metar.find('flight_category') is None:
print("Missing flight condition, skipping.")
continue
flightCategory = metar.find('flight_category').text
windGust = False
windSpeed = 0
lightning = False
if metar.find('wind_gust_kt') is not None:
windGust = (True if (ALWAYS_BLINK_FOR_GUSTS or int(metar.find('wind_gust_kt').text) > WIND_BLINK_THRESHOLD) else False)
if metar.find('wind_speed_kt') is not None:
windSpeed = int(metar.find('wind_speed_kt').text)
if metar.find('raw_text') is not None:
rawText = metar.find('raw_text').text
lightning = False if rawText.find('LTG') == -1 else True
print(stationId + ":" + flightCategory + ":" + str(windSpeed) + ":" + str(windGust) + ":" + str(lightning))
conditionDict[stationId] = { "flightCategory" : flightCategory, "windSpeed" : windSpeed, "windGust": windGust, "lightning": lightning }
# Setting LED colors based on weather conditions
windCycle = False
while True:
i = 0
for airportcode in airports:
# Skip NULL entries
if airportcode == "NULL":
i += 1
continue
color = COLOR_CLEAR
conditions = conditionDict.get(airportcode, None)
if conditions != None:
windy = True if (ACTIVATE_WINDCONDITION_ANIMATION and windCycle == True and (conditions["windSpeed"] > WIND_BLINK_THRESHOLD or conditions["windGust"] == True)) else False
lightningConditions = True if (ACTIVATE_LIGHTNING_ANIMATION and windCycle == False and conditions["lightning"] == True) else False
if conditions["flightCategory"] == "VFR":
color = COLOR_VFR if not (windy or lightningConditions) else COLOR_LIGHTNING if lightningConditions else (COLOR_VFR_FADE if FADE_INSTEAD_OF_BLINK else COLOR_CLEAR) if windy else COLOR_CLEAR
elif conditions["flightCategory"] == "MVFR":
color = COLOR_MVFR if not (windy or lightningConditions) else COLOR_LIGHTNING if lightningConditions else (COLOR_MVFR_FADE if FADE_INSTEAD_OF_BLINK else COLOR_CLEAR) if windy else COLOR_CLEAR
elif conditions["flightCategory"] == "IFR":
color = COLOR_IFR if not (windy or lightningConditions) else COLOR_LIGHTNING if lightningConditions else (COLOR_IFR_FADE if FADE_INSTEAD_OF_BLINK else COLOR_CLEAR) if windy else COLOR_CLEAR
elif conditions["flightCategory"] == "LIFR":
color = COLOR_LIFR if not (windy or lightningConditions) else COLOR_LIGHTNING if lightningConditions else (COLOR_LIFR_FADE if FADE_INSTEAD_OF_BLINK else COLOR_CLEAR) if windy else COLOR_CLEAR
else:
color = COLOR_CLEAR
#print("Setting LED " + str(i) + " for " + airportcode + " to " + ("lightning " if lightningConditions else "") + ("windy " if windy else "") + (conditions["flightCategory"] if conditions != None else "None") + " " + str(color))
pixels[i] = color
i += 1
# Update actual LEDs all at once
pixels.show()
# Switching between animation cycles
time.sleep(BLINK_SPEED)
windCycle = False if windCycle else True
print()
print("Done")
| 47.415254
| 243
| 0.741912
|
794a727be486d16611dbdc87189210ccc4ce986d
| 19,800
|
py
|
Python
|
ml-agents/mlagents/trainers/tests/test_ppo.py
|
MisterPiggy/ml-agents
|
ab0336244d757745312c3077814064d40fb0b0e8
|
[
"Apache-2.0"
] | null | null | null |
ml-agents/mlagents/trainers/tests/test_ppo.py
|
MisterPiggy/ml-agents
|
ab0336244d757745312c3077814064d40fb0b0e8
|
[
"Apache-2.0"
] | 4
|
2020-01-10T19:44:04.000Z
|
2021-05-21T16:06:01.000Z
|
ml-agents/mlagents/trainers/tests/test_ppo.py
|
kayloshai/MachineLearning
|
c9385d0db79665449af6c7566d9bb4da2434d8ab
|
[
"MIT"
] | 1
|
2021-09-02T07:21:57.000Z
|
2021-09-02T07:21:57.000Z
|
from unittest import mock
import pytest
import numpy as np
from mlagents.tf_utils import tf
import yaml
from mlagents.trainers.ppo.models import PPOModel
from mlagents.trainers.ppo.trainer import PPOTrainer, discount_rewards
from mlagents.trainers.ppo.policy import PPOPolicy
from mlagents.trainers.models import EncoderType, LearningModel
from mlagents.trainers.trainer import UnityTrainerException
from mlagents.trainers.brain import BrainParameters, CameraResolution
from mlagents.trainers.agent_processor import AgentManagerQueue
from mlagents_envs.environment import UnityEnvironment
from mlagents_envs.mock_communicator import MockCommunicator
from mlagents.trainers.tests import mock_brain as mb
from mlagents.trainers.tests.mock_brain import make_brain_parameters
from mlagents.trainers.tests.test_trajectory import make_fake_trajectory
from mlagents.trainers.brain_conversion_utils import (
step_result_to_brain_info,
group_spec_to_brain_parameters,
)
@pytest.fixture
def dummy_config():
return yaml.safe_load(
"""
trainer: ppo
batch_size: 32
beta: 5.0e-3
buffer_size: 512
epsilon: 0.2
hidden_units: 128
lambd: 0.95
learning_rate: 3.0e-4
max_steps: 5.0e4
normalize: true
num_epoch: 5
num_layers: 2
time_horizon: 64
sequence_length: 64
summary_freq: 1000
use_recurrent: false
normalize: true
memory_size: 8
curiosity_strength: 0.0
curiosity_enc_size: 1
summary_path: test
model_path: test
reward_signals:
extrinsic:
strength: 1.0
gamma: 0.99
"""
)
VECTOR_ACTION_SPACE = [2]
VECTOR_OBS_SPACE = 8
DISCRETE_ACTION_SPACE = [3, 3, 3, 2]
BUFFER_INIT_SAMPLES = 32
NUM_AGENTS = 12
@mock.patch("mlagents_envs.environment.UnityEnvironment.executable_launcher")
@mock.patch("mlagents_envs.environment.UnityEnvironment.get_communicator")
def test_ppo_policy_evaluate(mock_communicator, mock_launcher, dummy_config):
tf.reset_default_graph()
mock_communicator.return_value = MockCommunicator(
discrete_action=False, visual_inputs=0
)
env = UnityEnvironment(" ")
env.reset()
brain_name = env.get_agent_groups()[0]
brain_info = step_result_to_brain_info(
env.get_step_result(brain_name), env.get_agent_group_spec(brain_name)
)
brain_params = group_spec_to_brain_parameters(
brain_name, env.get_agent_group_spec(brain_name)
)
trainer_parameters = dummy_config
model_path = brain_name
trainer_parameters["model_path"] = model_path
trainer_parameters["keep_checkpoints"] = 3
policy = PPOPolicy(0, brain_params, trainer_parameters, False, False)
run_out = policy.evaluate(brain_info)
assert run_out["action"].shape == (3, 2)
env.close()
@mock.patch("mlagents_envs.environment.UnityEnvironment.executable_launcher")
@mock.patch("mlagents_envs.environment.UnityEnvironment.get_communicator")
def test_ppo_get_value_estimates(mock_communicator, mock_launcher, dummy_config):
tf.reset_default_graph()
brain_params = BrainParameters(
brain_name="test_brain",
vector_observation_space_size=1,
camera_resolutions=[],
vector_action_space_size=[2],
vector_action_descriptions=[],
vector_action_space_type=0,
)
dummy_config["summary_path"] = "./summaries/test_trainer_summary"
dummy_config["model_path"] = "./models/test_trainer_models/TestModel"
policy = PPOPolicy(0, brain_params, dummy_config, False, False)
time_horizon = 15
trajectory = make_fake_trajectory(
length=time_horizon,
max_step_complete=True,
vec_obs_size=1,
num_vis_obs=0,
action_space=2,
)
run_out = policy.get_value_estimates(trajectory.next_obs, "test_agent", done=False)
for key, val in run_out.items():
assert type(key) is str
assert type(val) is float
run_out = policy.get_value_estimates(trajectory.next_obs, "test_agent", done=True)
for key, val in run_out.items():
assert type(key) is str
assert val == 0.0
# Check if we ignore terminal states properly
policy.reward_signals["extrinsic"].use_terminal_states = False
run_out = policy.get_value_estimates(trajectory.next_obs, "test_agent", done=True)
for key, val in run_out.items():
assert type(key) is str
assert val != 0.0
agentbuffer = trajectory.to_agentbuffer()
batched_values = policy.get_batched_value_estimates(agentbuffer)
for values in batched_values.values():
assert len(values) == 15
def test_ppo_model_cc_vector():
tf.reset_default_graph()
with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
model = PPOModel(
make_brain_parameters(discrete_action=False, visual_inputs=0)
)
init = tf.global_variables_initializer()
sess.run(init)
run_list = [
model.output,
model.log_probs,
model.value,
model.entropy,
model.learning_rate,
]
feed_dict = {
model.batch_size: 2,
model.sequence_length: 1,
model.vector_in: np.array([[1, 2, 3, 1, 2, 3], [3, 4, 5, 3, 4, 5]]),
model.epsilon: np.array([[0, 1], [2, 3]]),
}
sess.run(run_list, feed_dict=feed_dict)
def test_ppo_model_cc_visual():
tf.reset_default_graph()
with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
model = PPOModel(
make_brain_parameters(discrete_action=False, visual_inputs=2)
)
init = tf.global_variables_initializer()
sess.run(init)
run_list = [
model.output,
model.log_probs,
model.value,
model.entropy,
model.learning_rate,
]
feed_dict = {
model.batch_size: 2,
model.sequence_length: 1,
model.vector_in: np.array([[1, 2, 3, 1, 2, 3], [3, 4, 5, 3, 4, 5]]),
model.visual_in[0]: np.ones([2, 40, 30, 3], dtype=np.float32),
model.visual_in[1]: np.ones([2, 40, 30, 3], dtype=np.float32),
model.epsilon: np.array([[0, 1], [2, 3]], dtype=np.float32),
}
sess.run(run_list, feed_dict=feed_dict)
def test_ppo_model_dc_visual():
tf.reset_default_graph()
with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
model = PPOModel(
make_brain_parameters(discrete_action=True, visual_inputs=2)
)
init = tf.global_variables_initializer()
sess.run(init)
run_list = [
model.output,
model.all_log_probs,
model.value,
model.entropy,
model.learning_rate,
]
feed_dict = {
model.batch_size: 2,
model.sequence_length: 1,
model.vector_in: np.array([[1, 2, 3, 1, 2, 3], [3, 4, 5, 3, 4, 5]]),
model.visual_in[0]: np.ones([2, 40, 30, 3], dtype=np.float32),
model.visual_in[1]: np.ones([2, 40, 30, 3], dtype=np.float32),
model.action_masks: np.ones([2, 2], dtype=np.float32),
}
sess.run(run_list, feed_dict=feed_dict)
def test_ppo_model_dc_vector():
tf.reset_default_graph()
with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
model = PPOModel(
make_brain_parameters(discrete_action=True, visual_inputs=0)
)
init = tf.global_variables_initializer()
sess.run(init)
run_list = [
model.output,
model.all_log_probs,
model.value,
model.entropy,
model.learning_rate,
]
feed_dict = {
model.batch_size: 2,
model.sequence_length: 1,
model.vector_in: np.array([[1, 2, 3, 1, 2, 3], [3, 4, 5, 3, 4, 5]]),
model.action_masks: np.ones([2, 2], dtype=np.float32),
}
sess.run(run_list, feed_dict=feed_dict)
def test_ppo_model_dc_vector_rnn():
tf.reset_default_graph()
with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
memory_size = 128
model = PPOModel(
make_brain_parameters(discrete_action=True, visual_inputs=0),
use_recurrent=True,
m_size=memory_size,
)
init = tf.global_variables_initializer()
sess.run(init)
run_list = [
model.output,
model.all_log_probs,
model.value,
model.entropy,
model.learning_rate,
model.memory_out,
]
feed_dict = {
model.batch_size: 1,
model.sequence_length: 2,
model.prev_action: [[0], [0]],
model.memory_in: np.zeros((1, memory_size), dtype=np.float32),
model.vector_in: np.array([[1, 2, 3, 1, 2, 3], [3, 4, 5, 3, 4, 5]]),
model.action_masks: np.ones([1, 2], dtype=np.float32),
}
sess.run(run_list, feed_dict=feed_dict)
def test_ppo_model_cc_vector_rnn():
tf.reset_default_graph()
with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
memory_size = 128
model = PPOModel(
make_brain_parameters(discrete_action=False, visual_inputs=0),
use_recurrent=True,
m_size=memory_size,
)
init = tf.global_variables_initializer()
sess.run(init)
run_list = [
model.output,
model.all_log_probs,
model.value,
model.entropy,
model.learning_rate,
model.memory_out,
]
feed_dict = {
model.batch_size: 1,
model.sequence_length: 2,
model.memory_in: np.zeros((1, memory_size), dtype=np.float32),
model.vector_in: np.array([[1, 2, 3, 1, 2, 3], [3, 4, 5, 3, 4, 5]]),
model.epsilon: np.array([[0, 1]]),
}
sess.run(run_list, feed_dict=feed_dict)
def test_rl_functions():
rewards = np.array([0.0, 0.0, 0.0, 1.0], dtype=np.float32)
gamma = 0.9
returns = discount_rewards(rewards, gamma, 0.0)
np.testing.assert_array_almost_equal(
returns, np.array([0.729, 0.81, 0.9, 1.0], dtype=np.float32)
)
def test_trainer_increment_step(dummy_config):
trainer_params = dummy_config
brain_params = BrainParameters(
brain_name="test_brain",
vector_observation_space_size=1,
camera_resolutions=[],
vector_action_space_size=[2],
vector_action_descriptions=[],
vector_action_space_type=0,
)
trainer = PPOTrainer(
brain_params.brain_name, 0, trainer_params, True, False, 0, "0", False
)
policy_mock = mock.Mock(spec=PPOPolicy)
step_count = (
5
) # 10 hacked because this function is no longer called through trainer
policy_mock.increment_step = mock.Mock(return_value=step_count)
trainer.add_policy("testbehavior", policy_mock)
trainer._increment_step(5, "testbehavior")
policy_mock.increment_step.assert_called_with(5)
assert trainer.step == step_count
@mock.patch("mlagents_envs.environment.UnityEnvironment")
@pytest.mark.parametrize("use_discrete", [True, False])
def test_trainer_update_policy(mock_env, dummy_config, use_discrete):
env, mock_brain, _ = mb.setup_mock_env_and_brains(
mock_env,
use_discrete,
False,
num_agents=NUM_AGENTS,
vector_action_space=VECTOR_ACTION_SPACE,
vector_obs_space=VECTOR_OBS_SPACE,
discrete_action_space=DISCRETE_ACTION_SPACE,
)
trainer_params = dummy_config
trainer_params["use_recurrent"] = True
# Test curiosity reward signal
trainer_params["reward_signals"]["curiosity"] = {}
trainer_params["reward_signals"]["curiosity"]["strength"] = 1.0
trainer_params["reward_signals"]["curiosity"]["gamma"] = 0.99
trainer_params["reward_signals"]["curiosity"]["encoding_size"] = 128
trainer = PPOTrainer(
mock_brain.brain_name, 0, trainer_params, True, False, 0, "0", False
)
policy = trainer.create_policy(mock_brain)
trainer.add_policy(mock_brain.brain_name, policy)
# Test update with sequence length smaller than batch size
buffer = mb.simulate_rollout(env, trainer.policy, BUFFER_INIT_SAMPLES)
# Mock out reward signal eval
buffer["extrinsic_rewards"] = buffer["rewards"]
buffer["extrinsic_returns"] = buffer["rewards"]
buffer["extrinsic_value_estimates"] = buffer["rewards"]
buffer["curiosity_rewards"] = buffer["rewards"]
buffer["curiosity_returns"] = buffer["rewards"]
buffer["curiosity_value_estimates"] = buffer["rewards"]
trainer.update_buffer = buffer
trainer._update_policy()
# Make batch length a larger multiple of sequence length
trainer.trainer_parameters["batch_size"] = 128
trainer._update_policy()
# Make batch length a larger non-multiple of sequence length
trainer.trainer_parameters["batch_size"] = 100
trainer._update_policy()
def test_process_trajectory(dummy_config):
brain_params = BrainParameters(
brain_name="test_brain",
vector_observation_space_size=1,
camera_resolutions=[],
vector_action_space_size=[2],
vector_action_descriptions=[],
vector_action_space_type=0,
)
dummy_config["summary_path"] = "./summaries/test_trainer_summary"
dummy_config["model_path"] = "./models/test_trainer_models/TestModel"
trainer = PPOTrainer(brain_params, 0, dummy_config, True, False, 0, "0", False)
policy = trainer.create_policy(brain_params)
trainer.add_policy(brain_params.brain_name, policy)
trajectory_queue = AgentManagerQueue("testbrain")
trainer.subscribe_trajectory_queue(trajectory_queue)
time_horizon = 15
trajectory = make_fake_trajectory(
length=time_horizon,
max_step_complete=True,
vec_obs_size=1,
num_vis_obs=0,
action_space=2,
)
trajectory_queue.put(trajectory)
trainer.advance()
# Check that trainer put trajectory in update buffer
assert trainer.update_buffer.num_experiences == 15
# Check that GAE worked
assert (
"advantages" in trainer.update_buffer
and "discounted_returns" in trainer.update_buffer
)
# Check that the stats are being collected as episode isn't complete
for reward in trainer.collected_rewards.values():
for agent in reward.values():
assert agent > 0
# Add a terminal trajectory
trajectory = make_fake_trajectory(
length=time_horizon + 1,
max_step_complete=False,
vec_obs_size=1,
num_vis_obs=0,
action_space=2,
)
trajectory_queue.put(trajectory)
trainer.advance()
# Check that the stats are reset as episode is finished
for reward in trainer.collected_rewards.values():
for agent in reward.values():
assert agent == 0
assert trainer.stats_reporter.get_stats_summaries("Policy/Extrinsic Reward").num > 0
def test_normalization(dummy_config):
brain_params = BrainParameters(
brain_name="test_brain",
vector_observation_space_size=1,
camera_resolutions=[],
vector_action_space_size=[2],
vector_action_descriptions=[],
vector_action_space_type=0,
)
dummy_config["summary_path"] = "./summaries/test_trainer_summary"
dummy_config["model_path"] = "./models/test_trainer_models/TestModel"
trainer = PPOTrainer(
brain_params.brain_name, 0, dummy_config, True, False, 0, "0", False
)
time_horizon = 6
trajectory = make_fake_trajectory(
length=time_horizon,
max_step_complete=True,
vec_obs_size=1,
num_vis_obs=0,
action_space=2,
)
# Change half of the obs to 0
for i in range(3):
trajectory.steps[i].obs[0] = np.zeros(1, dtype=np.float32)
policy = trainer.create_policy(brain_params)
trainer.add_policy(brain_params.brain_name, policy)
trainer._process_trajectory(trajectory)
# Check that the running mean and variance is correct
steps, mean, variance = trainer.policy.sess.run(
[
trainer.policy.model.normalization_steps,
trainer.policy.model.running_mean,
trainer.policy.model.running_variance,
]
)
assert steps == 6
assert mean[0] == 0.5
# Note: variance is divided by number of steps, and initialized to 1 to avoid
# divide by 0. The right answer is 0.25
assert (variance[0] - 1) / steps == 0.25
# Make another update, this time with all 1's
time_horizon = 10
trajectory = make_fake_trajectory(
length=time_horizon,
max_step_complete=True,
vec_obs_size=1,
num_vis_obs=0,
action_space=2,
)
trainer._process_trajectory(trajectory)
# Check that the running mean and variance is correct
steps, mean, variance = trainer.policy.sess.run(
[
trainer.policy.model.normalization_steps,
trainer.policy.model.running_mean,
trainer.policy.model.running_variance,
]
)
assert steps == 16
assert mean[0] == 0.8125
assert (variance[0] - 1) / steps == pytest.approx(0.152, abs=0.01)
def test_min_visual_size():
# Make sure each EncoderType has an entry in MIS_RESOLUTION_FOR_ENCODER
assert set(LearningModel.MIN_RESOLUTION_FOR_ENCODER.keys()) == set(EncoderType)
for encoder_type in EncoderType:
with tf.Graph().as_default():
good_size = LearningModel.MIN_RESOLUTION_FOR_ENCODER[encoder_type]
good_res = CameraResolution(
width=good_size, height=good_size, num_channels=3
)
LearningModel._check_resolution_for_encoder(good_res, encoder_type)
vis_input = LearningModel.create_visual_input(
good_res, "test_min_visual_size"
)
enc_func = LearningModel.get_encoder_for_type(encoder_type)
enc_func(vis_input, 32, LearningModel.swish, 1, "test", False)
# Anything under the min size should raise an exception. If not, decrease the min size!
with pytest.raises(Exception):
with tf.Graph().as_default():
bad_size = LearningModel.MIN_RESOLUTION_FOR_ENCODER[encoder_type] - 1
bad_res = CameraResolution(
width=bad_size, height=bad_size, num_channels=3
)
with pytest.raises(UnityTrainerException):
# Make sure we'd hit a friendly error during model setup time.
LearningModel._check_resolution_for_encoder(bad_res, encoder_type)
vis_input = LearningModel.create_visual_input(
bad_res, "test_min_visual_size"
)
enc_func = LearningModel.get_encoder_for_type(encoder_type)
enc_func(vis_input, 32, LearningModel.swish, 1, "test", False)
if __name__ == "__main__":
pytest.main()
| 35.231317
| 95
| 0.634293
|
794a7338289c8245c01ea0e624f0c6b026971643
| 11,636
|
py
|
Python
|
insta/views.py
|
careymwarabu/Insta-Piktures
|
3bbac8d89f4badbb67bfac6d9fb96fd85128704a
|
[
"MIT"
] | null | null | null |
insta/views.py
|
careymwarabu/Insta-Piktures
|
3bbac8d89f4badbb67bfac6d9fb96fd85128704a
|
[
"MIT"
] | null | null | null |
insta/views.py
|
careymwarabu/Insta-Piktures
|
3bbac8d89f4badbb67bfac6d9fb96fd85128704a
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render,redirect
from django.contrib.auth.models import User
from .models import Image, Profile, Follow, Comment
from django.http import HttpResponseRedirect, Http404
from django.urls import reverse
from .forms import CreateProfileForm,UploadImageForm, EditBioForm, FollowForm, UnfollowForm,Comment
from django.contrib.auth.decorators import login_required
from .email import send_welcome_email
# Create your views here.
@login_required(login_url='/accounts/login/')
def index(request):
current_user =request.user
try:
logged_in = Profile.objects.get(user = current_user)
except Profile.DoesNotExist:
raise Http404()
timeline_images = []
current_images = Image.objects.filter(profile = logged_in)
for current_image in current_images:
timeline_images.append(current_image.id)
current_following = Follow.objects.filter(follower = logged_in)
for following in current_following:
following_profile = following.followed
following_images = Image.get_profile_images(following_profile)
for image in following_images:
timeline_images.append(image.id)
display_images= Image.objects.filter(pk__in = timeline_images).order_by('-post_date')
liked = False
for i in display_images:
image = Image.objects.get(pk=i.id)
liked = False
if image.likes.filter(id =request.user.id).exists():
liked = True
comments = Comment.objects.all()[:3]
comments_count= comments.count()
suggestions = Profile.objects.all()[:4]
print("SUGGESTED")
print(suggestions[0])
return render(request, 'index.html', {"images":display_images,"current_user": current_user, "liked":liked, "comments":comments, "suggestions":suggestions, "logged_in":logged_in})
#comment function
def comment(request, image_id):
image = Image.objects.get(pk=image_id)
content = request.GET.get("comment")
print(content)
user = request.user
comment= Comment(image = image, content = content, user=user)
comment.save_comment()
return redirect('/')
#liking an image
def like_image(request,image_id):
image = Image.objects.get(pk=image_id)
liked = False
current_user = request.user
try:
profile = Profile.objects.get(user = current_user)
except Profile.DoesNotExist:
raise Http404()
if image.likes.filter(id=profile.id).exists():
image.likes.remove(profile)
liked = False
else:
image.likes.add(profile)
liked = True
return redirect('/')
#creating a profile
@login_required(login_url='/accounts/login/')
def create_profile(request):
current_user = request.user
if request.method == 'POST':
form = CreateProfileForm(request.POST,request.FILES)
if form.is_valid():
profile = form.save(commit=False)
profile.user = current_user
profile.save()
return HttpResponseRedirect('/')
else:
form = CreateProfileForm()
return render(request, 'create.html', {"form": form})
#email sign up
@login_required(login_url='/accounts/login/')
def email(request):
current_user = request.user
email = current_user.email
name = current_user.username
send_welcome_email(name, email)
return redirect(create_profile)
#search view function
def search(request):
if "user" in request.GET and request.GET["user"]:
searched_user = request.GET.get("user")
try:
user = Profile.search_user(searched_user)
profile_id = user[0].id
title= user[0].username
except User.DoesNotExist:
raise Http404()
current_user = request.user
try:
profile = Profile.objects.get(id =profile_id)
except Profile.DoesNotExist:
raise Http404()
try:
profile_following = Profile.objects.get(user = current_user)
except Profile.DoesNotExist:
raise Http404()
try:
profile_followed = Profile.objects.get(id = profile_id)
except Profile.DoesNotExist:
raise Http404()
if request.method == 'POST':
if 'follow' in request.POST:
form = FollowForm(request.POST)
if form.is_valid():
this_follow = form.save(commit=False)
this_follow.followed=profile_followed
this_follow.follower=profile_following
this_follow.save()
set_of_followers=Follow.objects.filter(followed = profile_followed)
num_of_followers=len(set_of_followers)
profile_followed.followers=num_of_followers
profile_followed.save()
set_of_following=Follow.objects.filter(follower = profile_following)
num_of_following=len(set_of_following)
profile_following.following=num_of_following
profile_following.save()
return HttpResponseRedirect(f'/profile/{profile_id}')
elif 'unfollow' in request.POST:
form = UnfollowForm(request.POST)
if form.is_valid():
this_unfollow = form.save(commit=False)
is_unfollow = Follow.objects.filter(followed = profile_followed, follower = profile_following)
is_unfollow.delete()
set_of_followers=Follow.objects.filter(followed = profile_followed)
num_of_followers=len(set_of_followers)
profile_followed.followers=num_of_followers
profile_followed.save()
set_of_following=Follow.objects.filter(follower = profile_following)
num_of_following=len(set_of_following)
profile_following.following=num_of_following
profile_following.save()
return HttpResponseRedirect(f'/profile/{profile_id}')
else:
form_follow = FollowForm()
form_unfollow = UnfollowForm()
images = Image.objects.filter(profile = profile).order_by('-post_date')
images = Image.get_profile_images(profile = profile)
images = Image.objects.filter(profile = profile).order_by('-post_date')
posts = images.count()
is_following = Follow.objects.filter(followed = profile_followed, follower = profile_following)
comments = Comment.objects.order_by('-post_date')
if is_following:
return render(request, 'profile/profile.html', {"profile": profile, "images": images, "comments":comments, "unfollow_form": form_unfollow, "posts": posts, "title": title})
return render(request, 'profile/profile.html', {"profile": profile, "images": images, "comments":comments, "follow_form": form_follow, "posts": posts, "title": title, "search":searched_user})
else:
no_search="You did not search for any user"
return render(request, 'profile/profile.html',{"no_search":no_search})
#profile
@login_required(login_url='/accounts/login/')
def profile(request, profile_id):
title = "Profile"
current_user = request.user
try:
profile = Profile.objects.get(id =profile_id)
except Profile.DoesNotExist:
raise Http404()
try:
profile_following = Profile.objects.get(user = current_user)
except Profile.DoesNotExist:
raise Http404()
try:
profile_followed = Profile.objects.get(id = profile_id)
except Profile.DoesNotExist:
raise Http404()
if request.method == 'POST':
if 'follow' in request.POST:
form = FollowForm(request.POST)
if form.is_valid():
this_follow = form.save(commit=False)
this_follow.followed=profile_followed
this_follow.follower=profile_following
this_follow.save()
set_of_followers=Follow.objects.filter(followed = profile_followed)
num_of_followers=len(set_of_followers)
profile_followed.followers=num_of_followers
profile_followed.save()
set_of_following=Follow.objects.filter(follower = profile_following)
num_of_following=len(set_of_following)
profile_following.following=num_of_following
profile_following.save()
return HttpResponseRedirect(f'/profile/{profile_id}')
elif 'unfollow' in request.POST:
form = UnfollowForm(request.POST)
if form.is_valid():
this_unfollow = form.save(commit=False)
is_unfollow = Follow.objects.filter(followed = profile_followed, follower = profile_following)
is_unfollow.delete()
set_of_followers=Follow.objects.filter(followed = profile_followed)
num_of_followers=len(set_of_followers)
profile_followed.followers=num_of_followers
profile_followed.save()
set_of_following=Follow.objects.filter(follower = profile_following)
num_of_following=len(set_of_following)
profile_following.following=num_of_following
profile_following.save()
return HttpResponseRedirect(f'/profile/{profile_id}')
else:
form_follow = FollowForm()
form_unfollow = UnfollowForm()
images = Image.objects.filter(profile = profile).order_by('-post_date')
images = Image.get_profile_images(profile = profile)
images = Image.objects.filter(profile = profile).order_by('-post_date')
posts = images.count()
is_following = Follow.objects.filter(followed = profile_followed, follower = profile_following)
comments = Comment.objects.order_by('-post_date')
if is_following:
return render(request, 'profile/profile.html', {"profile": profile, "images": images, "comments":comments, "unfollow_form": form_unfollow, "posts": posts, "title": title})
return render(request, 'profile/profile.html', {"profile": profile, "images": images, "comments":comments, "follow_form": form_follow, "posts": posts, "title": title})
#uploading an image
@login_required(login_url='/accounts/login/')
def upload_image(request):
title = "Instagram | Upload image"
current_user = request.user
try:
profile = Profile.objects.get(user = current_user)
except Profile.DoesNotExist:
raise Http404()
if request.method == "POST":
form = UploadImageForm(request.POST, request.FILES)
if form.is_valid():
image = form.save(commit=False)
image.profile = profile
image.save()
return redirect('/')
else:
form = UploadImageForm()
return render(request, 'upload_image.html', {"form": form, "title": title})
#editing a profile
def profile_edit(request):
current_user = request.user
if request.method == "POST":
form = EditBioForm(request.POST, request.FILES)
if form.is_valid():
profile_pic = form.cleaned_data['profile_pic']
bio = form.cleaned_data['bio']
updated_profile = Profile.objects.get(user= current_user)
updated_profile.profile_pic = profile_pic
updated_profile.bio = bio
updated_profile.save()
return redirect('profile')
else:
form = EditBioForm()
return render(request, 'profile/profile.html', {"form": form})
| 41.116608
| 199
| 0.645583
|
794a7361b2a4c141166333ffaec7fbe0cf711df7
| 178
|
py
|
Python
|
server/setup.py
|
jojo-31/peakdb
|
9c4dd1c1e10ce26f705b85b554e581119b07ea5f
|
[
"MIT"
] | null | null | null |
server/setup.py
|
jojo-31/peakdb
|
9c4dd1c1e10ce26f705b85b554e581119b07ea5f
|
[
"MIT"
] | null | null | null |
server/setup.py
|
jojo-31/peakdb
|
9c4dd1c1e10ce26f705b85b554e581119b07ea5f
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name="peakdb",
version="0.0.1",
description=("A simple module."),
packages=find_packages(exclude=["tests"]),
)
| 22.25
| 46
| 0.674157
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.