hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a086e0f5c78501d6401229240ce28b80f9a023b
| 14,057
|
py
|
Python
|
nox/virtualenv.py
|
dineshks1/nox
|
4e1842246d8b005e0dadf4b0f3e9d1123cefa651
|
[
"Apache-2.0"
] | null | null | null |
nox/virtualenv.py
|
dineshks1/nox
|
4e1842246d8b005e0dadf4b0f3e9d1123cefa651
|
[
"Apache-2.0"
] | null | null | null |
nox/virtualenv.py
|
dineshks1/nox
|
4e1842246d8b005e0dadf4b0f3e9d1123cefa651
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Alethea Katherine Flowers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
import re
import shutil
import sys
from socket import gethostbyname
from typing import Any, List, Mapping, Optional, Tuple, Union
import nox.command
import py
from nox.logger import logger
from . import _typing
# Problematic environment variables that are stripped from all commands inside
# of a virtualenv. See https://github.com/theacodes/nox/issues/44
_BLACKLISTED_ENV_VARS = frozenset(
["PIP_RESPECT_VIRTUALENV", "PIP_REQUIRE_VIRTUALENV", "__PYVENV_LAUNCHER__"]
)
_SYSTEM = platform.system()
class InterpreterNotFound(OSError):
def __init__(self, interpreter: str) -> None:
super().__init__("Python interpreter {} not found".format(interpreter))
self.interpreter = interpreter
class ProcessEnv:
"""A environment with a 'bin' directory and a set of 'env' vars."""
# Does this environment provide any process isolation?
is_sandboxed = False
# Special programs that aren't included in the environment.
allowed_globals = () # type: _typing.ClassVar[Tuple[Any, ...]]
def __init__(self, bin_paths: None = None, env: Mapping[str, str] = None) -> None:
self._bin_paths = bin_paths
self.env = os.environ.copy()
if env is not None:
self.env.update(env)
for key in _BLACKLISTED_ENV_VARS:
self.env.pop(key, None)
if self.bin_paths:
self.env["PATH"] = os.pathsep.join(
self.bin_paths + [self.env.get("PATH", "")]
)
@property
def bin_paths(self) -> Optional[List[str]]:
return self._bin_paths
@property
def bin(self) -> Optional[str]:
"""The first bin directory for the virtualenv."""
paths = self.bin_paths
return paths[0] if paths is not None else None
def create(self) -> bool:
raise NotImplementedError("ProcessEnv.create should be overwritten in subclass")
def locate_via_py(version: str) -> Optional[str]:
"""Find the Python executable using the Windows Launcher.
This is based on :pep:397 which details that executing
``py.exe -{version}`` should execute python with the requested
version. We then make the python process print out its full
executable path which we use as the location for the version-
specific Python interpreter.
Args:
version (str): The desired Python version to pass to ``py.exe``. Of the form
``X.Y`` or ``X.Y-32``. For example, a usage of the Windows Launcher might
be ``py -3.6-32``.
Returns:
Optional[str]: The full executable path for the Python ``version``,
if it is found.
"""
script = "import sys; print(sys.executable)"
py_exe = py.path.local.sysfind("py")
if py_exe is not None:
try:
return py_exe.sysexec("-" + version, "-c", script).strip()
except py.process.cmdexec.Error:
return None
return None
def locate_using_path_and_version(version: str) -> Optional[str]:
"""Check the PATH's python interpreter and return it if the version
matches.
On systems without version-named interpreters and with missing
launcher (which is on all Windows Anaconda installations),
we search the PATH for a plain "python" interpreter and accept it
if its --version matches the specified interpreter version.
Args:
version (str): The desired Python version. Of the form ``X.Y``.
Returns:
Optional[str]: The full executable path for the Python ``version``,
if it is found.
"""
if not version:
return None
script = "import platform; print(platform.python_version())"
path_python = py.path.local.sysfind("python")
if path_python:
try:
prefix = "{}.".format(version)
version_string = path_python.sysexec("-c", script).strip()
if version_string.startswith(prefix):
return str(path_python)
except py.process.cmdexec.Error:
return None
return None
def _clean_location(self: "Union[CondaEnv, VirtualEnv]") -> bool:
"""Deletes any existing path-based environment"""
if os.path.exists(self.location):
if self.reuse_existing:
return False
else:
shutil.rmtree(self.location)
return True
class PassthroughEnv(ProcessEnv):
"""Represents the environment used to run nox itself
For now, this class is empty but it might contain tools to grasp some
hints about the actual env.
"""
@staticmethod
def is_offline() -> bool:
"""As of now this is only used in conda_install"""
return CondaEnv.is_offline() # pragma: no cover
class CondaEnv(ProcessEnv):
"""Conda environment management class.
Args:
location (str): The location on the filesystem where the conda environment
should be created.
interpreter (Optional[str]): The desired Python version. Of the form
* ``X.Y``, e.g. ``3.5``
* ``X.Y-32``. For example, a usage of the Windows Launcher might
be ``py -3.6-32``
* ``X.Y.Z``, e.g. ``3.4.9``
* ``pythonX.Y``, e.g. ``python2.7``
* A path in the filesystem to a Python executable
If not specified, this will use the currently running Python.
reuse_existing (Optional[bool]): Flag indicating if the conda environment
should be reused if it already exists at ``location``.
"""
is_sandboxed = True
allowed_globals = ("conda",)
def __init__(
self,
location: str,
interpreter: Optional[str] = None,
reuse_existing: bool = False,
venv_params: Any = None,
):
self.location_name = location
self.location = os.path.abspath(location)
self.interpreter = interpreter
self.reuse_existing = reuse_existing
self.venv_params = venv_params if venv_params else []
super(CondaEnv, self).__init__()
_clean_location = _clean_location
@property
def bin_paths(self) -> List[str]:
"""Returns the location of the conda env's bin folder."""
# see https://docs.anaconda.com/anaconda/user-guide/tasks/integration/python-path/#examples
if _SYSTEM == "Windows":
return [self.location, os.path.join(self.location, "Scripts")]
else:
return [os.path.join(self.location, "bin")]
def create(self) -> bool:
"""Create the conda env."""
if not self._clean_location():
logger.debug(
"Re-using existing conda env at {}.".format(self.location_name)
)
return False
cmd = [
"conda",
"create",
"--yes",
"--prefix",
self.location,
# Ensure the pip package is installed.
"pip",
]
cmd.extend(self.venv_params)
if self.interpreter:
python_dep = "python={}".format(self.interpreter)
else:
python_dep = "python"
cmd.append(python_dep)
logger.info(
"Creating conda env in {} with {}".format(self.location_name, python_dep)
)
nox.command.run(cmd, silent=True, log=False)
return True
@staticmethod
def is_offline() -> bool:
"""Return `True` if we are sure that the user is not able to connect to https://repo.anaconda.com.
Since an HTTP proxy might be correctly configured for `conda` using the `.condarc` `proxy_servers` section,
while not being correctly configured in the OS environment variables used by all other tools including python
`urllib` or `requests`, we are basically not able to do much more than testing the DNS resolution.
See details in this explanation: https://stackoverflow.com/a/62486343/7262247
"""
try:
# DNS resolution to detect situation (1) or (2).
host = gethostbyname("repo.anaconda.com")
return host is None
except: # pragma: no cover # noqa E722
return True
class VirtualEnv(ProcessEnv):
"""Virtualenv management class.
Args:
location (str): The location on the filesystem where the virtual environment
should be created.
interpreter (Optional[str]): The desired Python version. Of the form
* ``X.Y``, e.g. ``3.5``
* ``X.Y-32``. For example, a usage of the Windows Launcher might
be ``py -3.6-32``
* ``X.Y.Z``, e.g. ``3.4.9``
* ``pythonX.Y``, e.g. ``python2.7``
* A path in the filesystem to a Python executable
If not specified, this will use the currently running Python.
reuse_existing (Optional[bool]): Flag indicating if the virtual environment
should be reused if it already exists at ``location``.
"""
is_sandboxed = True
def __init__(
self,
location: str,
interpreter: Optional[str] = None,
reuse_existing: bool = False,
*,
venv: bool = False,
venv_params: Any = None
):
self.location_name = location
self.location = os.path.abspath(location)
self.interpreter = interpreter
self._resolved = None # type: Union[None, str, InterpreterNotFound]
self.reuse_existing = reuse_existing
self.venv_or_virtualenv = "venv" if venv else "virtualenv"
self.venv_params = venv_params if venv_params else []
super(VirtualEnv, self).__init__(env={"VIRTUAL_ENV": self.location})
_clean_location = _clean_location
@property
def _resolved_interpreter(self) -> str:
"""Return the interpreter, appropriately resolved for the platform.
Based heavily on tox's implementation (tox/interpreters.py).
"""
# If there is no assigned interpreter, then use the same one used by
# Nox.
if isinstance(self._resolved, Exception):
raise self._resolved
if self._resolved is not None:
return self._resolved
if self.interpreter is None:
self._resolved = sys.executable
return self._resolved
# Otherwise we need to divine the path to the interpreter. This is
# designed to accept strings in the form of "2", "2.7", "2.7.13",
# "2.7.13-32", "python2", "python2.4", etc.
xy_version = ""
cleaned_interpreter = self.interpreter
# If this is just a X, X.Y, or X.Y.Z string, extract just the X / X.Y
# part and add Python to the front of it.
match = re.match(r"^(?P<xy_ver>\d(\.\d+)?)(\.\d+)?$", self.interpreter)
if match:
xy_version = match.group("xy_ver")
cleaned_interpreter = "python{}".format(xy_version)
# If the cleaned interpreter is on the PATH, go ahead and return it.
if py.path.local.sysfind(cleaned_interpreter):
self._resolved = cleaned_interpreter
return self._resolved
# The rest of this is only applicable to Windows, so if we don't have
# an interpreter by now, raise.
if _SYSTEM != "Windows":
self._resolved = InterpreterNotFound(self.interpreter)
raise self._resolved
# Allow versions of the form ``X.Y-32`` for Windows.
match = re.match(r"^\d\.\d+-32?$", cleaned_interpreter)
if match:
# preserve the "-32" suffix, as the Python launcher expects
# it.
xy_version = cleaned_interpreter
path_from_launcher = locate_via_py(xy_version)
if path_from_launcher:
self._resolved = path_from_launcher
return self._resolved
path_from_version_param = locate_using_path_and_version(xy_version)
if path_from_version_param:
self._resolved = path_from_version_param
return self._resolved
# If we got this far, then we were unable to resolve the interpreter
# to an actual executable; raise an exception.
self._resolved = InterpreterNotFound(self.interpreter)
raise self._resolved
@property
def bin_paths(self) -> List[str]:
"""Returns the location of the virtualenv's bin folder."""
if _SYSTEM == "Windows":
return [os.path.join(self.location, "Scripts")]
else:
return [os.path.join(self.location, "bin")]
def create(self) -> bool:
"""Create the virtualenv or venv."""
if not self._clean_location():
logger.debug(
"Re-using existing virtual environment at {}.".format(
self.location_name
)
)
return False
if self.venv_or_virtualenv == "virtualenv":
cmd = [sys.executable, "-m", "virtualenv", self.location]
if self.interpreter:
cmd.extend(["-p", self._resolved_interpreter])
else:
cmd = [self._resolved_interpreter, "-m", "venv", self.location]
cmd.extend(self.venv_params)
logger.info(
"Creating virtual environment ({}) using {} in {}".format(
self.venv_or_virtualenv,
os.path.basename(self._resolved_interpreter),
self.location_name,
)
)
nox.command.run(cmd, silent=True, log=False)
return True
| 34.538084
| 117
| 0.620047
|
4a086e4088ec218f27b55cbbd66b843924cdf66d
| 40
|
py
|
Python
|
__init__.py
|
14kw/python-crowd
|
a6228ae13e787e5e477a0a42e7ba57b4c93aac24
|
[
"BSD-2-Clause"
] | 26
|
2016-05-16T18:19:26.000Z
|
2020-12-01T08:09:18.000Z
|
__init__.py
|
14kw/python-crowd
|
a6228ae13e787e5e477a0a42e7ba57b4c93aac24
|
[
"BSD-2-Clause"
] | 17
|
2016-05-01T09:21:43.000Z
|
2021-12-01T02:06:27.000Z
|
__init__.py
|
14kw/python-crowd
|
a6228ae13e787e5e477a0a42e7ba57b4c93aac24
|
[
"BSD-2-Clause"
] | 24
|
2016-04-29T16:31:54.000Z
|
2021-12-09T01:53:23.000Z
|
__all__ = ['crowd']
from crowd import *
| 13.333333
| 19
| 0.675
|
4a086e68c40d5c0842dc0305e416d50c16c08cc9
| 949
|
py
|
Python
|
src/zope/app/authentication/browser/adding.py
|
zopefoundation/zope.app.authentication
|
1100f938aa0e8d9b4d2378ce2534c4a3c0d11c00
|
[
"ZPL-2.1"
] | null | null | null |
src/zope/app/authentication/browser/adding.py
|
zopefoundation/zope.app.authentication
|
1100f938aa0e8d9b4d2378ce2534c4a3c0d11c00
|
[
"ZPL-2.1"
] | 4
|
2017-05-01T12:56:58.000Z
|
2021-01-13T07:35:20.000Z
|
src/zope/app/authentication/browser/adding.py
|
zopefoundation/zope.app.authentication
|
1100f938aa0e8d9b4d2378ce2534c4a3c0d11c00
|
[
"ZPL-2.1"
] | 1
|
2015-04-03T07:28:05.000Z
|
2015-04-03T07:28:05.000Z
|
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Adding that redirects to plugins.html.
$Id$
"""
import zope.app.container.browser.adding
from zope.traversing.browser.absoluteurl import absoluteURL
class Adding(zope.app.container.browser.adding.Adding):
def nextURL(self):
return absoluteURL(self.context, self.request) + '/@@contents.html'
| 33.892857
| 78
| 0.632244
|
4a086ed9353707348eb423ee65796e02e70a87d0
| 3,113
|
py
|
Python
|
my_selenium_project/testcases/pytest/test_category.py
|
xinlc/selenium-learning
|
456f755a29272e8be94a38bff018433a5ff42204
|
[
"Apache-2.0"
] | null | null | null |
my_selenium_project/testcases/pytest/test_category.py
|
xinlc/selenium-learning
|
456f755a29272e8be94a38bff018433a5ff42204
|
[
"Apache-2.0"
] | null | null | null |
my_selenium_project/testcases/pytest/test_category.py
|
xinlc/selenium-learning
|
456f755a29272e8be94a38bff018433a5ff42204
|
[
"Apache-2.0"
] | null | null | null |
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from time import sleep
from testcases.pytest.test_admin_login import TestAdminLogin
import pytest
class TestCategory(object):
def setup_class(self):
self.login = TestAdminLogin()
# 测试文章分类失败,名称为空
@pytest.mark.dependency(depends=["admin_login"], scope="module")
def test_add_category_error(self):
name = ''
parent = 'python'
slug = 'test'
expected = '分类名称不能为空'
# 点击文章
self.login.driver.find_element_by_xpath('//*[@id="sidebar-menu"]/li[4]/a/span[1]').click()
sleep(1)
# 点击分类
self.login.driver.find_element_by_xpath('//*[@id="sidebar-menu"]/li[4]/ul/li[3]/a').click()
sleep(1)
# 输入分类名称
self.login.driver.find_element_by_name('category.title').send_keys(name)
# 选择父分类
parent_category_elem = self.login.driver.find_element_by_name('category.pid')
Select(parent_category_elem).select_by_visible_text(parent)
# 输入slug
self.login.driver.find_element_by_name('category.slug').send_keys(slug)
# 点击添加
self.login.driver.find_element_by_xpath(
'/html/body/div/div/section[2]/div/div[1]/div/form/div[2]/div/div/button').click()
loc = (By.CLASS_NAME, 'toast-message')
WebDriverWait(self.login.driver, 5).until(EC.visibility_of_element_located(loc))
msg = self.login.driver.find_element(*loc).text
assert msg == expected
# 测试文章分类成功
@pytest.mark.dependency(depends=["admin_login"], scope="module")
def test_add_category_ok(self):
name = 'test'
parent = 'python'
slug = 'test'
expected = None
# 点击文章
# 上一个测试直接 点击分类
# self.login.driver.find_element_by_xpath('//*[@id="sidebar-menu"]/li[4]/a/span[1]').click()
sleep(1)
# 点击分类
self.login.driver.find_element_by_xpath('//*[@id="sidebar-menu"]/li[4]/ul/li[3]/a').click()
sleep(1)
# 输入分类名称
self.login.driver.find_element_by_name('category.title').clear()
self.login.driver.find_element_by_name('category.title').send_keys(name)
# 选择父分类
parent_category_elem = self.login.driver.find_element_by_name('category.pid')
Select(parent_category_elem).select_by_visible_text(parent)
# 输入slug
self.login.driver.find_element_by_name('category.slug').clear()
self.login.driver.find_element_by_name('category.slug').send_keys(slug)
# 点击添加
self.login.driver.find_element_by_xpath(
'/html/body/div/div/section[2]/div/div[1]/div/form/div[2]/div/div/button').click()
# 没有异常就添加成功,没有提示信息
assert 1 == 1
def runTest(self):
self.test_add_category_error()
self.test_add_category_ok()
if __name__ == '__main__':
# 这里可以跳过登录错误
pytest.main(['test_category.py'])
| 32.092784
| 100
| 0.65628
|
4a086f568084b9d906d6be572cdf0ad9c8f02980
| 2,648
|
py
|
Python
|
thaniya_server/src/thaniya_server/jobs/JobProcessingCtx.py
|
jkpubsrc/Thaniya
|
4ebdf2854e3d7888af7396adffa22628b4ab2267
|
[
"Apache-1.1"
] | 1
|
2021-01-20T18:27:22.000Z
|
2021-01-20T18:27:22.000Z
|
thaniya_server/src/thaniya_server/jobs/JobProcessingCtx.py
|
jkpubsrc/Thaniya
|
4ebdf2854e3d7888af7396adffa22628b4ab2267
|
[
"Apache-1.1"
] | null | null | null |
thaniya_server/src/thaniya_server/jobs/JobProcessingCtx.py
|
jkpubsrc/Thaniya
|
4ebdf2854e3d7888af7396adffa22628b4ab2267
|
[
"Apache-1.1"
] | null | null | null |
import os
import typing
import jk_typing
import jk_utils
import jk_logging
from .EnumJobState import EnumJobState
from .Job import Job
class JobProcessingCtx(object):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
# @param str jobType The type of the job this job processor can handle.
#
@jk_typing.checkFunctionSignature()
def __init__(self, terminationFlag:jk_utils.TerminationFlag, log:jk_logging.AbstractLogger, **kwargs):
assert "terminate" not in kwargs
assert "checkForTermination" not in kwargs
assert "log" not in kwargs
assert "terminationFlag" not in kwargs
self.__data = kwargs
self.__log = log
self.__terminationFlag = terminationFlag
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def log(self) -> jk_logging.AbstractLogger:
return self.__log
#
@property
def terminationFlag(self) -> jk_utils.TerminationFlag:
return self.__terminationFlag
#
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def __getattr__(self, name:str):
if name in self.__data:
return self.__data[name]
else:
return super().__getattr__(name)
#
def terminate(self):
self.__terminationFlag.terminate()
#
#
# Check if the current activity is to be interrupted. In that case an InterruptedException is raised.
#
def checkForTermination(self):
self.__terminationFlag.check()
#
################################################################################################################################
## Static Methods
################################################################################################################################
#
| 25.219048
| 129
| 0.371979
|
4a087069afcb07fb0df52f1da69c8f145811d0aa
| 2,137
|
py
|
Python
|
apps/public/rosetta/create_score_json_from_scored_decoys.py
|
RosettaCommons/jade2
|
40affc7c4e0f1f6ee07030e72de284e3484946e7
|
[
"BSD-3-Clause"
] | 1
|
2019-12-23T21:52:23.000Z
|
2019-12-23T21:52:23.000Z
|
apps/public/rosetta/create_score_json_from_scored_decoys.py
|
RosettaCommons/jade2
|
40affc7c4e0f1f6ee07030e72de284e3484946e7
|
[
"BSD-3-Clause"
] | null | null | null |
apps/public/rosetta/create_score_json_from_scored_decoys.py
|
RosettaCommons/jade2
|
40affc7c4e0f1f6ee07030e72de284e3484946e7
|
[
"BSD-3-Clause"
] | 2
|
2021-11-13T01:34:15.000Z
|
2021-11-13T01:34:34.000Z
|
#!/usr/bin/env python3
import os,json,re,glob,sys
from argparse import ArgumentParser
from jade2.rosetta_jade.score_util import parse_decoy_scores
def get_pdbs(argu):
if os.path.isdir(argu):
print("Gathering PDBs: " + argu)
pdbs = glob.glob(argu+"/*.pdb*")
return pdbs
elif os.path.isfile(argu) and not re.search(".pdb", argu) and not re.search(".pdb.gz", argu):
print("Parsing PDBs: " + argu)
return [ x.strip() for x in open(argu, 'r').readlines() if not x.startswith('#') and x ]
else:
return [argu]
def get_parser():
parser = ArgumentParser(description="This script creates a Rosetta score file from a set of structures - by parsing the score from them. Pass a directory, a PDBLIST, and/or a list of filenames")
parser.add_argument("--prefix",
help = "Any prefix to use. ",
default = "")
parser.add_argument("decoys",
help = "A directory, a PDBLIST, and/or a list of filenames",
default = [],
nargs="*")
return parser
if __name__ == "__main__":
parser = get_parser()
options = parser.parse_args()
#print(options)
if len(options.decoys) == 0:
sys.exit("Please pass decoys to parse score.")
decoys = []
for argu in options.decoys:
pdbs = get_pdbs(argu)
#print(pdbs)
decoys.extend(pdbs)
#print("\n".join(decoys))
if options.prefix:
OUTFILE = open(options.prefix+"score.json", 'w')
else:
OUTFILE = open(options.prefix + "score.json", 'w')
scores = []
decoy_num = 1
print("Reading",len(decoys), "decoys")
for decoy in decoys:
if decoy_num % 50 == 0:
print("Decoy",decoy_num)
score_dict = parse_decoy_scores(decoy)
if not score_dict:
print("decoy", decoy, "has no score")
if score_dict:
OUTFILE.write(json.dumps(score_dict, sort_keys=True)+"\n")
decoy_num+=1
#OUTFILE.write("\n".join(json.dumps(scores)))
OUTFILE.close()
print("Done")
| 28.493333
| 198
| 0.583528
|
4a0870cc037e6c4c0329880e3c7db4fa64f2a370
| 1,510
|
py
|
Python
|
tests/test_nutritionproduct.py
|
glichtner/fhir.resources
|
94896d8f8a0b7dd69253762aab968f4fd6eb69a0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_nutritionproduct.py
|
glichtner/fhir.resources
|
94896d8f8a0b7dd69253762aab968f4fd6eb69a0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_nutritionproduct.py
|
glichtner/fhir.resources
|
94896d8f8a0b7dd69253762aab968f4fd6eb69a0
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/NutritionProduct
Release: R5
Version: 4.5.0
Build ID: 0d95498
Last updated: 2021-04-03T00:34:11.075+00:00
"""
from pydantic.validators import bytes_validator # noqa: F401
from fhir.resources import fhirtypes # noqa: F401
from fhir.resources import nutritionproduct
def impl_nutritionproduct_1(inst):
assert inst.id == "example"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.status == "active"
assert inst.text.div == (
'<div xmlns="http://www.w3.org/1999/xhtml">[Put rendering ' "here]</div>"
)
assert inst.text.status == "generated"
def test_nutritionproduct_1(base_settings):
"""No. 1 tests collection for NutritionProduct.
Test File: nutritionproduct-example.json
"""
filename = base_settings["unittest_data_dir"] / "nutritionproduct-example.json"
inst = nutritionproduct.NutritionProduct.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "NutritionProduct" == inst.resource_type
impl_nutritionproduct_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "NutritionProduct" == data["resourceType"]
inst2 = nutritionproduct.NutritionProduct(**data)
impl_nutritionproduct_1(inst2)
| 32.826087
| 87
| 0.706623
|
4a087171bb3b4a19d0b830d323f145ddc37ee9ff
| 1,297
|
py
|
Python
|
blqs/blqs/program_test.py
|
ionq/blqs
|
0667159d0475e0fc0df5aa607f87c8e90c38eb39
|
[
"Apache-2.0"
] | 1
|
2022-02-01T18:38:30.000Z
|
2022-02-01T18:38:30.000Z
|
blqs/blqs/program_test.py
|
ionq/blqs
|
0667159d0475e0fc0df5aa607f87c8e90c38eb39
|
[
"Apache-2.0"
] | null | null | null |
blqs/blqs/program_test.py
|
ionq/blqs
|
0667159d0475e0fc0df5aa607f87c8e90c38eb39
|
[
"Apache-2.0"
] | 1
|
2022-02-03T21:09:59.000Z
|
2022-02-03T21:09:59.000Z
|
# Copyright 2021 The Blqs Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pymore
import pytest
import blqs
def test_program_str():
assert str(blqs.Program.of("a", "b")) == "a\nb"
assert str(blqs.Program.of(blqs.Block.of("a"), "b")) == " a\nb"
def test_program_not_at_top_block_stack():
with blqs.Block():
with pytest.raises(AssertionError, match="stack is empty"):
blqs.Program()
def test_program_equality():
eq = pymore.EqualsTester()
eq.add_equality_group(blqs.Program(), blqs.Program())
eq.make_equality_group(lambda: blqs.Program.of("a"))
eq.make_equality_group(lambda: blqs.Program.of("a", "b"))
eq.add_equality_group(blqs.Program.of(blqs.Block.of()))
eq.add_equality_group(blqs.Program.of(blqs.Block.of("a")))
| 34.131579
| 74
| 0.718581
|
4a0871c72264eeb1474c1c5d1666a62acfa2f170
| 3,139
|
py
|
Python
|
eggs/PasteScript-1.7.5-py2.7.egg/paste/script/cherrypy_server.py
|
salayhin/talkofacta
|
8b5a14245dd467bb1fda75423074c4840bd69fb7
|
[
"MIT"
] | 1
|
2016-02-10T18:22:42.000Z
|
2016-02-10T18:22:42.000Z
|
eggs/PasteScript-1.7.5-py2.7.egg/paste/script/cherrypy_server.py
|
salayhin/talkofacta
|
8b5a14245dd467bb1fda75423074c4840bd69fb7
|
[
"MIT"
] | 1
|
2016-04-19T13:03:17.000Z
|
2016-04-19T13:03:17.000Z
|
eggs/PasteScript-1.7.5-py2.7.egg/paste/script/cherrypy_server.py
|
salayhin/talkofacta
|
8b5a14245dd467bb1fda75423074c4840bd69fb7
|
[
"MIT"
] | null | null | null |
"""
Entry point for CherryPy's WSGI server
"""
try:
from cherrypy import wsgiserver
except ImportError:
print '=' * 60
print '== You must install CherryPy (pip install cherrypy) to use the egg:PasteScript#cherrypy server'
print '=' * 60
raise
def cpwsgi_server(app, global_conf=None, host='127.0.0.1', port=None,
ssl_pem=None, protocol_version=None, numthreads=None,
server_name=None, max=None, request_queue_size=None,
timeout=None):
"""
Serves the specified WSGI app via CherryPyWSGIServer.
``app``
The WSGI 'application callable'; multiple WSGI applications
may be passed as (script_name, callable) pairs.
``host``
This is the ipaddress to bind to (or a hostname if your
nameserver is properly configured). This defaults to
127.0.0.1, which is not a public interface.
``port``
The port to run on, defaults to 8080 for HTTP, or 4443 for
HTTPS. This can be a string or an integer value.
``ssl_pem``
This an optional SSL certificate file (via OpenSSL) You can
generate a self-signed test PEM certificate file as follows:
$ openssl genrsa 1024 > host.key
$ chmod 400 host.key
$ openssl req -new -x509 -nodes -sha1 -days 365 \\
-key host.key > host.cert
$ cat host.cert host.key > host.pem
$ chmod 400 host.pem
``protocol_version``
The protocol used by the server, by default ``HTTP/1.1``.
``numthreads``
The number of worker threads to create.
``server_name``
The string to set for WSGI's SERVER_NAME environ entry.
``max``
The maximum number of queued requests. (defaults to -1 = no
limit).
``request_queue_size``
The 'backlog' argument to socket.listen(); specifies the
maximum number of queued connections.
``timeout``
The timeout in seconds for accepted connections.
"""
is_ssl = False
if ssl_pem:
port = port or 4443
is_ssl = True
if not port:
if ':' in host:
host, port = host.split(':', 1)
else:
port = 8080
bind_addr = (host, int(port))
kwargs = {}
for var_name in ('numthreads', 'max', 'request_queue_size', 'timeout'):
var = locals()[var_name]
if var is not None:
kwargs[var_name] = int(var)
server = wsgiserver.CherryPyWSGIServer(bind_addr, app,
server_name=server_name, **kwargs)
server.ssl_certificate = server.ssl_private_key = ssl_pem
if protocol_version:
server.protocol = protocol_version
try:
protocol = is_ssl and 'https' or 'http'
if host == '0.0.0.0':
print 'serving on 0.0.0.0:%s view at %s://127.0.0.1:%s' % \
(port, protocol, port)
else:
print "serving on %s://%s:%s" % (protocol, host, port)
server.start()
except (KeyboardInterrupt, SystemExit):
server.stop()
return server
| 28.798165
| 106
| 0.587448
|
4a08722561740c7a7a16108d15fe1f70a0c4f8b0
| 1,298
|
py
|
Python
|
home_application/models.py
|
xuys1214/wyapp
|
1376ef4d0c3e40d07d4b9d78610b24c3143be548
|
[
"Apache-2.0"
] | 1
|
2020-02-19T08:40:39.000Z
|
2020-02-19T08:40:39.000Z
|
home_application/models.py
|
xuys1214/wyapp
|
1376ef4d0c3e40d07d4b9d78610b24c3143be548
|
[
"Apache-2.0"
] | 4
|
2020-02-12T03:13:26.000Z
|
2021-06-10T22:03:02.000Z
|
home_application/models.py
|
xuys1214/wyapp
|
1376ef4d0c3e40d07d4b9d78610b24c3143be548
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
# from django.db import models
from django.db import models
class HostPerforms(models.Model):
ip = models.CharField(max_length=20)
bk_biz_id = models.IntegerField('业务ID')
bk_cloud_id = models.IntegerField('云区域ID')
create_time = models.DateTimeField('纳管时间', auto_now=True)
class HostPerformsUsage(models.Model):
ip = models.CharField(max_length=20)
mem_usage = models.CharField('内存使用率', max_length=10)
disk_usage = models.CharField('磁盘使用率', max_length=10)
cpu_usage = models.CharField('CPU使用率', max_length=10)
create_time = models.DateTimeField('录入时间', auto_now=True)
| 51.92
| 115
| 0.765023
|
4a08729e25ad1a422f1bba03dce3f782f8484a14
| 675
|
py
|
Python
|
model/solution_test.py
|
glstr/python_learning
|
243908d6f358764386f2e58dfbfde10a406d803c
|
[
"Apache-2.0"
] | 2
|
2018-09-20T06:08:00.000Z
|
2018-09-26T13:57:20.000Z
|
model/solution_test.py
|
glstr/python_learning
|
243908d6f358764386f2e58dfbfde10a406d803c
|
[
"Apache-2.0"
] | null | null | null |
model/solution_test.py
|
glstr/python_learning
|
243908d6f358764386f2e58dfbfde10a406d803c
|
[
"Apache-2.0"
] | 1
|
2019-03-25T05:53:32.000Z
|
2019-03-25T05:53:32.000Z
|
#!/usr/bin/python
# coding:utf-8
import unittest
import solution
class SolutionTest(unittest.TestCase):
def setUp(self):
self.solution = solution.Solution()
return
def test_add_two(self):
nums = [2, 7, 11, 15]
target = 9
res = self.solution.two_sum(nums, target)
expect = [0, 1]
self.assertEqual(res, expect)
def test_add_two_numbers(self):
l1 = solution.make_list([5])
l2 = solution.make_list([5])
res = self.solution.add_two_numbers(l1, l2)
array = solution.list_to_array(res)
self.assertEqual(array, [0, 1])
if __name__ == '__main__':
unittest.main()
| 21.774194
| 51
| 0.608889
|
4a0872c99c56da8fdf9c0d22e78f6a5d5773d43b
| 9,469
|
py
|
Python
|
metashare/settings.py
|
hpusset/ELRI
|
c4455cff3adb920627f014f37e740665342e9cee
|
[
"BSD-3-Clause"
] | null | null | null |
metashare/settings.py
|
hpusset/ELRI
|
c4455cff3adb920627f014f37e740665342e9cee
|
[
"BSD-3-Clause"
] | null | null | null |
metashare/settings.py
|
hpusset/ELRI
|
c4455cff3adb920627f014f37e740665342e9cee
|
[
"BSD-3-Clause"
] | null | null | null |
from os.path import abspath, dirname
ROOT_PATH = abspath(dirname(__file__))
import os
import sys
import logging
from logging.handlers import RotatingFileHandler
from django.core.urlresolvers import reverse_lazy
# Import local settings, i.e., DEBUG, TEMPLATE_DEBUG, TIME_ZONE,
# DATABASE_* settings, ADMINS, etc.
from local_settings import *
# Allows to disable check for duplicate instances.
CHECK_FOR_DUPLICATE_INSTANCES = True
# Logging settings for this Django project.
LOG_LEVEL = logging.INFO
LOG_FORMAT = "[%(asctime)s] %(name)s::%(levelname)s %(message)s"
LOG_DATE = "%m/%d/%Y @ %H:%M:%S"
LOG_FORMATTER = logging.Formatter(LOG_FORMAT, LOG_DATE)
# Allows to disable check for duplicate instances.
CHECK_FOR_DUPLICATE_INSTANCES = True
# work around a problem on non-posix-compliant platforms by not using any
# RotatingFileHandler there
if os.name == "posix":
LOG_HANDLER = RotatingFileHandler(filename=LOG_FILENAME, mode="a",
maxBytes=1024*1024, backupCount=5, encoding="utf-8")
else:
LOG_HANDLER = logging.FileHandler(filename=LOG_FILENAME, mode="a",
encoding="utf-8")
LOG_HANDLER.setLevel(level=LOG_LEVEL)
LOG_HANDLER.setFormatter(LOG_FORMATTER)
# init root logger
logging.basicConfig(format=LOG_FORMAT, datefmt=LOG_DATE, level=LOG_LEVEL)
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Maximum size of files uploaded as resrouce data.
# The default is a cautious value in order to protect the server
# against resource starvation; if you think your server can handle
# bigger files, feel free to try and increase this value.
MAXIMUM_UPLOAD_SIZE = 400 * 1024 * 1024
# Synchronization info:
SYNC_NEEDS_AUTHENTICATION = True
# URL for the Metashare Knowledge Base
KNOWLEDGE_BASE_URL = 'http://www.meta-share.org/knowledgebase/'
# The URL for META-SHARE statistics server.
STATS_SERVER_URL = "http://metastats.fbk.eu/"
# The URL for GeoIP database.
GEOIP_DATA_URL = "http://geolite.maxmind.com/download/geoip/database/GeoLiteCountry/GeoIP.dat.gz"
# If STORAGE_PATH does not exist, try to create it and halt if not
# possible.
try:
if not os.path.isdir(STORAGE_PATH):
os.makedirs(STORAGE_PATH)
if not os.path.isdir(LOCK_DIR):
os.makedirs(LOCK_DIR)
except:
raise OSError, "STORAGE_PATH must exist and be writable!"
# If XDIFF_LOCATION was not set in local_settings, set a default here:
try:
_ = XDIFF_LOCATION
except:
XDIFF_LOCATION = None
# Perform some cleanup operations on the imported local settings.
if DJANGO_URL.strip().endswith('/'):
DJANGO_URL = DJANGO_URL.strip()[:-1]
if not DJANGO_BASE.strip().endswith('/'):
DJANGO_BASE = '{0}/'.format(DJANGO_BASE.strip())
if DJANGO_BASE.strip().startswith('/'):
DJANGO_BASE = DJANGO_BASE.strip()[1:]
# Pagination settings for this django project.
PAGINATION_ITEMS_PER_PAGE = 50
LOGIN_URL = '/{0}login/'.format(DJANGO_BASE)
LOGIN_REDIRECT_URL = reverse_lazy('frontpage') # reverse it to grab the i18n prefix
LOGOUT_URL = '/{0}logout/'.format(DJANGO_BASE)
MANAGERS = ADMINS
SITE_ID = 1
METASHARE_VERSION = '3.1.1'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
STATIC_URL = '/static/'
STATICFILES_DIRS = ( '{0}/static/'.format(ROOT_PATH),)
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django_password_validation.DjangoPasswordValidationMiddleware', # must come before AuthenticationMiddleware
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'metashare.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'{0}/templates'.format(ROOT_PATH),
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.request",
"metashare.context_processors.global_settings",
)
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.messages',
'django.contrib.humanize',
'django.contrib.sitemaps',
'django.contrib.staticfiles',
'selectable',
'haystack',
'analytical',
'tastypie',
'metashare.accounts',
'metashare.storage',
# 'metashare.sync',
'metashare.stats',
'metashare.recommendations',
'metashare.repository',
'metashare.bcp47',
# 'metashare.processing',
'progressbarupload',
'metashare.edelivery',
# PROJECT MANAGEMENT
'project_management',
)
FILE_UPLOAD_HANDLERS = (
"django.core.files.uploadhandler.MemoryFileUploadHandler",
"django.core.files.uploadhandler.TemporaryFileUploadHandler",
"progressbarupload.uploadhandler.ProgressBarUploadHandler",
)
# add Kronos to installed apps if not running on Windows
if os.name != 'nt':
INSTALLED_APPS += ('kronos',)
# basic Haystack search backend configuration
TEST_MODE_NAME = 'testing'
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
'URL': SOLR_URL,
'SILENTLY_FAIL': False
},
TEST_MODE_NAME: {
'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
'URL': TESTING_SOLR_URL,
'SILENTLY_FAIL': False
},
}
# This setting controls what haystack SignalProcessor class is used to handle
# Django's signals and keep the search index up-to-date.
HAYSTACK_SIGNAL_PROCESSOR = 'metashare.repository.signals.PatchedSignalProcessor'
# we use a custom Haystack search backend router so that we can dynamically
# switch between the main/default search backend and the one for testing
HAYSTACK_ROUTERS = [ 'metashare.haystack_routers.MetashareRouter' ]
# a custom test runner with the added value on top of the default Django test
# runner to automatically set up Haystack so that it uses a dedicated search
# backend for testing
TEST_RUNNER = 'metashare.test_runner.MetashareTestRunner'
PYLINT_RCFILE = '{0}/test-config/pylint.rc'.format(ROOT_PATH)
# set display for Selenium tests
if 'DISPLAY' in os.environ:
import re
SELENIUM_DISPLAY = re.sub(r'[^\:]*(\:\d{1,2})(?:\.\d+)?', r'\1',
os.environ['DISPLAY'])
# sitemap url to be used in "robots.txt"
SITEMAP_URL = '{}/sitemap.xml'.format(DJANGO_URL)
# maximum time interval in seconds allowed between two resource views so that
# the resources are still considered as 'viewed together';
# used in recommendations
MAX_VIEW_INTERVAL = 60 * 5
# maximum time interval in seconds allowed between two resource downloads so
# that the resources are still considered as 'downloaded together';
# used in recommendations
MAX_DOWNLOAD_INTERVAL = 60 * 10
# list of synchronization protocols supported by this node
SYNC_PROTOCOLS = (
'1.0',
)
# Full import path of a serializer class to use for serializing session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
# The Python path syntax of the setting we are using in META-SHARE
DJANGO_SETTINGS_MODULE = 'metashare.settings'
# A dictionary specifying the package where migration modules can be found
# on a per-app basis.
MIGRATION_MODULES = {
'accounts': 'metashare.accounts.django_migrations',
'repository': 'metashare.repository.django_migrations',
'stats': 'metashare.stats.django_migrations',
'recommendations': 'metashare.recommendations.django_migrations',
'storage': 'metashare.storage.django_migrations',
}
class DisableMigrations(object):
def __contains__(self, item):
return True
def __getitem__(self, item):
return "notmigrations"
TESTS_IN_PROGRESS = False
if 'test' in sys.argv[1:]:
logging.disable(logging.CRITICAL)
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
DEBUG = False
TEMPLATE_DEBUG = DEBUG
TESTS_IN_PROGRESS = True
MIGRATION_MODULES = DisableMigrations()
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
TASTYPIE_ALLOW_MISSING_SLASH = True
TASTYPIE_DEFAULT_FORMATS = ['json']
| 30.349359
| 113
| 0.722463
|
4a0872e96a2c8e5bf88015fcf27a92b7a81822d8
| 7,652
|
py
|
Python
|
jacket/tests/compute/unit/pci/test_devspec.py
|
bopopescu/jacket
|
d7ad3147fcb43131098c2a5210847634ff5fb325
|
[
"Apache-2.0"
] | null | null | null |
jacket/tests/compute/unit/pci/test_devspec.py
|
bopopescu/jacket
|
d7ad3147fcb43131098c2a5210847634ff5fb325
|
[
"Apache-2.0"
] | null | null | null |
jacket/tests/compute/unit/pci/test_devspec.py
|
bopopescu/jacket
|
d7ad3147fcb43131098c2a5210847634ff5fb325
|
[
"Apache-2.0"
] | 2
|
2016-08-10T02:21:49.000Z
|
2020-07-24T01:57:21.000Z
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from jacket.compute import exception
from jacket.objects import compute
from jacket.compute.pci import devspec
from jacket.compute import test
dev = {"vendor_id": "8086",
"product_id": "5057",
"address": "1234:5678:8988.5",
"parent_addr": "0000:0a:00.0"}
class PciAddressTestCase(test.NoDBTestCase):
def test_wrong_address(self):
pci_info = {"vendor_id": "8086", "address": "*: *: *.6",
"product_id": "5057", "physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertFalse(pci.match(dev))
def test_address_too_big(self):
pci_info = {"address": "0000:0a:0b:00.5",
"physical_network": "hr_net"}
self.assertRaises(exception.PciDeviceWrongAddressFormat,
devspec.PciDeviceSpec, pci_info)
def test_address_invalid_character(self):
pci_info = {"address": "0000:h4.12:6", "physical_network": "hr_net"}
self.assertRaises(exception.PciDeviceWrongAddressFormat,
devspec.PciDeviceSpec, pci_info)
def test_max_func(self):
pci_info = {"address": "0000:0a:00.%s" % (devspec.MAX_FUNC + 1),
"physical_network": "hr_net"}
exc = self.assertRaises(exception.PciDeviceInvalidAddressField,
devspec.PciDeviceSpec, pci_info)
msg = ('Invalid PCI Whitelist: '
'The PCI address 0000:0a:00.%s has an invalid function.'
% (devspec.MAX_FUNC + 1))
self.assertEqual(msg, six.text_type(exc))
def test_max_domain(self):
pci_info = {"address": "%x:0a:00.5" % (devspec.MAX_DOMAIN + 1),
"physical_network": "hr_net"}
exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
devspec.PciDeviceSpec, pci_info)
msg = ('Invalid PCI devices Whitelist config invalid domain %x'
% (devspec.MAX_DOMAIN + 1))
self.assertEqual(msg, six.text_type(exc))
def test_max_bus(self):
pci_info = {"address": "0000:%x:00.5" % (devspec.MAX_BUS + 1),
"physical_network": "hr_net"}
exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
devspec.PciDeviceSpec, pci_info)
msg = ('Invalid PCI devices Whitelist config invalid bus %x'
% (devspec.MAX_BUS + 1))
self.assertEqual(msg, six.text_type(exc))
def test_max_slot(self):
pci_info = {"address": "0000:0a:%x.5" % (devspec.MAX_SLOT + 1),
"physical_network": "hr_net"}
exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
devspec.PciDeviceSpec, pci_info)
msg = ('Invalid PCI devices Whitelist config invalid slot %x'
% (devspec.MAX_SLOT + 1))
self.assertEqual(msg, six.text_type(exc))
def test_address_is_undefined(self):
pci_info = {"vendor_id": "8086", "product_id": "5057"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertTrue(pci.match(dev))
def test_partial_address(self):
pci_info = {"address": ":0a:00.", "physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
dev = {"vendor_id": "1137",
"product_id": "0071",
"address": "0000:0a:00.5",
"parent_addr": "0000:0a:00.0"}
self.assertTrue(pci.match(dev))
@mock.patch('compute.pci.utils.is_physical_function', return_value = True)
def test_address_is_pf(self, mock_is_physical_function):
pci_info = {"address": "0000:0a:00.0", "physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertTrue(pci.match(dev))
class PciDevSpecTestCase(test.NoDBTestCase):
def test_spec_match(self):
pci_info = {"vendor_id": "8086", "address": "*: *: *.5",
"product_id": "5057", "physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertTrue(pci.match(dev))
def test_invalid_vendor_id(self):
pci_info = {"vendor_id": "8087", "address": "*: *: *.5",
"product_id": "5057", "physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertFalse(pci.match(dev))
def test_vendor_id_out_of_range(self):
pci_info = {"vendor_id": "80860", "address": "*:*:*.5",
"product_id": "5057", "physical_network": "hr_net"}
exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
devspec.PciDeviceSpec, pci_info)
self.assertEqual("Invalid PCI devices Whitelist config "
"invalid vendor_id 80860", six.text_type(exc))
def test_invalid_product_id(self):
pci_info = {"vendor_id": "8086", "address": "*: *: *.5",
"product_id": "5056", "physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertFalse(pci.match(dev))
def test_product_id_out_of_range(self):
pci_info = {"vendor_id": "8086", "address": "*:*:*.5",
"product_id": "50570", "physical_network": "hr_net"}
exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
devspec.PciDeviceSpec, pci_info)
self.assertEqual("Invalid PCI devices Whitelist config "
"invalid product_id 50570", six.text_type(exc))
def test_devname_and_address(self):
pci_info = {"devname": "eth0", "vendor_id": "8086",
"address": "*:*:*.5", "physical_network": "hr_net"}
self.assertRaises(exception.PciDeviceInvalidDeviceName,
devspec.PciDeviceSpec, pci_info)
@mock.patch('compute.pci.utils.get_function_by_ifname',
return_value = ("0000:0a:00.0", True))
def test_by_name(self, mock_get_function_by_ifname):
pci_info = {"devname": "eth0", "physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertTrue(pci.match(dev))
@mock.patch('compute.pci.utils.get_function_by_ifname',
return_value = (None, False))
def test_invalid_name(self, mock_get_function_by_ifname):
pci_info = {"devname": "lo", "physical_network": "hr_net"}
exc = self.assertRaises(exception.PciDeviceNotFoundById,
devspec.PciDeviceSpec, pci_info)
self.assertEqual('PCI device lo not found', six.text_type(exc))
def test_pci_obj(self):
pci_info = {"vendor_id": "8086", "address": "*:*:*.5",
"product_id": "5057", "physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
pci_dev = {
'compute_node_id': 1,
'address': '0000:00:00.5',
'product_id': '5057',
'vendor_id': '8086',
'status': 'available',
'parent_addr': None,
'extra_k1': 'v1',
}
pci_obj = compute.PciDevice.create(None, pci_dev)
self.assertTrue(pci.match_pci_obj(pci_obj))
| 42.988764
| 78
| 0.613304
|
4a087429aeae44d10a7390e200208c96aa36aae6
| 4,233
|
py
|
Python
|
LS7366R.py
|
PhakineeS/LS7366R
|
da4b1288d4243785dd92dd632db2e1cdc8c18252
|
[
"MIT"
] | null | null | null |
LS7366R.py
|
PhakineeS/LS7366R
|
da4b1288d4243785dd92dd632db2e1cdc8c18252
|
[
"MIT"
] | null | null | null |
LS7366R.py
|
PhakineeS/LS7366R
|
da4b1288d4243785dd92dd632db2e1cdc8c18252
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import spidev
from time import sleep
class LS7366R():
#commands
CLEAR_COUNTER = 0x20 # 0X 0010 0000
CLEAR_STATUS = 0x30 # 0X 0011 0000
READ_COUNTER = 0x60 # 0X 0110 0000
READ_STATUS = 0x70 # 0X 0111 0000
WRITE_MODE0 = 0x88 # 0X 1000 1000
WRITE_MODE1 = 0x90 # 0X 1001 0000
#mode
FOURX_COUNT = 0x03 # 0X 0000 0011
FOURBYTE_COUNTER = 0x00 # 0X 0000 0000
THREEBYTE_COUNTER = 0x01 # 0X 0000 0001
TWOBYTE_COUNTER = 0x02 # 0X 0000 0010
ONEBYTE_COUNTER = 0x03 # 0X 0000 0011
BYTE_MODE = [ ONEBYTE_COUNTER , TWOBYTE_COUNTER , THREEBYTE_COUNTER , FOURBYTE_COUNTER ]
MAX_VAL = 4294967295 # max value in integer
COUNTER_SIZE = 4 # default value
def __init__(self,CSX,SCK,BTMD):
self.COUNTER_SIZE = BTMD # declare counter size ( default is 4 )
self.spi = spidev.SpiDev() # initialize object
self.spi = spi.open(0,CSX) # connect the object to SPI device
self.spi.max_speed_hz = SCK # set speed
print("Clearing Encoder {} Count : {}".format(CSX,self.clear_counter())) # clear counter
print("Clearing Encode {} STATUS : {}".format(CSX,self.clear_status())) # clear status
self.spi.xfer2([self.WRITE_MODE0,self.FOURX_COUNT]) # send write command and mode0 value
sleep(0.1) # slow down
self.spi.xfer2([self.WRITE_MODE1,self.BYTE_MODE[self.COUNTER_SIZE-1]]) # send write command and mode1 value ( according to byte)
def close(self):
print("closing...")
self.spi.close() # disconnect the object from SPI device
def clear_counter(self):
self.spi.xfer2([self.CLEAR_COUNTER]) # send clear command
return ["DONE"]
def clear_status(self):
self.spi.xfer2([self.CLEAR_STATUS]) # send clear command
return ["DONE"]
def read_counter(self):
readTransaction = [self.READ_COUNTER] # add read command in list
for i in range(self.COUNTER_SIZE):
readTransaction.append(0) # add random data according to counter size (byte)
data = self.spi.xfer2(readTransaction) # send command and random data
#read data
EncoderCount = 0
for i in range(self.COUNTER_SIZE):
EncoderCount = (EncoderCount << 8) + data[i+1] # shift last encoder value to the left ( 8 bits ) and add new value on the right side ( position of new value plus one because first position is read command not value )
# check overflow
if data[1] != 255: # true if not overflow
return EncoderCount # not overflow
return EncoderCount + (self.MAX_VAL+1) # overflow
def read_status(self):
data = self.spi.xfer2([READ_STATUS, 0xFF]) # send read command and random data
return data[1] # return position one because position zero is random data
if __name__ == "__main__": # true if the python interpreter is running the source file as the main program
encoder = LS7366R(0, 1000000, 4) # build class
try:
while True:
print("Encoder count :",encoder.read_counter(),"Press CTRL-C to terminate test program.") # keep printing count
sleep(.2) # slown down
except KeyboardInterrupt: # true if press ctrl+c
encoder.close() # disconnect
print("ending..")
| 56.44
| 235
| 0.512403
|
4a0875bf86064b29cfd42163236cf9716f41914e
| 2,931
|
py
|
Python
|
src/model.py
|
kirbiyik/caption-
|
e6e4b6e28bb3553efa6892f9deb933fc9d797374
|
[
"MIT"
] | 10
|
2019-10-30T12:30:41.000Z
|
2021-09-29T09:38:10.000Z
|
src/model.py
|
kirbiyik/caption-
|
e6e4b6e28bb3553efa6892f9deb933fc9d797374
|
[
"MIT"
] | null | null | null |
src/model.py
|
kirbiyik/caption-
|
e6e4b6e28bb3553efa6892f9deb933fc9d797374
|
[
"MIT"
] | 3
|
2020-10-02T04:19:52.000Z
|
2021-04-04T13:47:03.000Z
|
import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence
class CaptioningModel(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers,
max_seq_length=20):
"""Load the pretrained ResNet-152 and replace top fc layer."""
# Decoder
super(CaptioningModel, self).__init__()
resnet = models.resnet18(pretrained=True)
modules = list(resnet.children())[:-1] # delete the last fc layer.
self.resnet = nn.Sequential(*modules)
self.linear1 = nn.Linear(resnet.fc.in_features, embed_size)
self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)
# Encoder
self.embed = nn.Embedding(vocab_size, embed_size)
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)
self.linear2 = nn.Linear(hidden_size, vocab_size)
self.max_seg_length = max_seq_length
def forward(self, images, captions, caption_lengths):
"""Extract feature vectors from input images."""
# Encoder forward
# Disable autograd mechanism to speed up since we use pretrained model
with torch.no_grad():
features = self.resnet(images)
features = features.reshape(features.size(0), -1)
features = self.bn(self.linear1(features))
# Decoder forward
embeddings = self.embed(captions)
# Give image features before caption in time series, not as hidden state
embeddings = torch.cat((features.unsqueeze(1), embeddings), 1)
packed = pack_padded_sequence(embeddings, caption_lengths, batch_first=True)
hiddens, _ = self.lstm(packed)
outputs = self.linear2(hiddens[0])
return outputs
def sample(self, images, states=None):
"""Generate captions for given image features using greedy search."""
# Extract features
with torch.no_grad():
features = self.resnet(images)
features = features.reshape(features.size(0), -1)
features = self.bn(self.linear1(features))
# Decode
sampled_ids = []
inputs = features.unsqueeze(1)
for i in range(self.max_seg_length):
hiddens, states = self.lstm(inputs, states) # hiddens: (batch_size, 1, hidden_size)
outputs = self.linear2(hiddens.squeeze(1)) # outputs: (batch_size, vocab_size)
_, predicted = outputs.max(1) # predicted: (batch_size)
sampled_ids.append(predicted)
inputs = self.embed(predicted) # inputs: (batch_size, embed_size)
inputs = inputs.unsqueeze(1) # inputs: (batch_size, 1, embed_size)
sampled_ids = torch.stack(sampled_ids, 1) # sampled_ids: (batch_size, max_seq_length)
return sampled_ids
| 48.85
| 108
| 0.630843
|
4a0877213c9bfd557c63f2f526d2e5472958ac82
| 8,691
|
py
|
Python
|
Bio/NaiveBayes.py
|
adamnovak/biopython
|
92772dd6add33e0b87ab593841f924f0f6f16090
|
[
"PostgreSQL"
] | 5
|
2016-03-09T03:41:23.000Z
|
2022-01-24T12:34:44.000Z
|
Bio/NaiveBayes.py
|
adamnovak/biopython
|
92772dd6add33e0b87ab593841f924f0f6f16090
|
[
"PostgreSQL"
] | null | null | null |
Bio/NaiveBayes.py
|
adamnovak/biopython
|
92772dd6add33e0b87ab593841f924f0f6f16090
|
[
"PostgreSQL"
] | 6
|
2016-10-24T11:27:13.000Z
|
2020-02-26T16:35:01.000Z
|
# Copyright 2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""This provides code for a general Naive Bayes learner.
Naive Bayes is a supervised classification algorithm that uses Bayes
rule to compute the fit between a new observation and some previously
observed data. The observations are discrete feature vectors, with
the Bayes assumption that the features are independent. Although this
is hardly ever true, the classifier works well enough in practice.
Glossary:
observation A feature vector of discrete data.
class A possible classification for an observation.
Classes:
NaiveBayes Holds information for a naive Bayes classifier.
Functions:
train Train a new naive Bayes classifier.
calculate Calculate the probabilities of each class, given an observation.
classify Classify an observation into a class.
"""
from __future__ import print_function
import numpy
def _contents(items):
term = 1.0/len(items)
counts = {}
for item in items:
counts[item] = counts.get(item, 0) + term
return counts
class NaiveBayes(object):
"""Holds information for a NaiveBayes classifier.
Members:
classes List of the possible classes of data.
p_conditional CLASS x DIM array of dicts of value -> P(value|class,dim)
p_prior List of the prior probabilities for every class.
dimensionality Dimensionality of the data.
"""
def __init__(self):
self.classes = []
self.p_conditional = None
self.p_prior = []
self.dimensionality = None
def calculate(nb, observation, scale=0):
"""calculate(nb, observation[, scale]) -> probability dict
Calculate log P(class|observation) for each class. nb is a NaiveBayes
classifier that has been trained. observation is a list representing
the observed data. scale is whether the probability should be
scaled by P(observation). By default, no scaling is done. The return
value is a dictionary where the keys is the class and the value is the
log probability of the class.
"""
# P(class|observation) = P(observation|class)*P(class)/P(observation)
# Taking the log:
# lP(class|observation) = lP(observation|class)+lP(class)-lP(observation)
# Make sure the observation has the right dimensionality.
if len(observation) != nb.dimensionality:
raise ValueError("observation in %d dimension, but classifier in %d"
% (len(observation), nb.dimensionality))
# Calculate log P(observation|class) for every class.
n = len(nb.classes)
lp_observation_class = numpy.zeros(n) # array of log P(observation|class)
for i in range(n):
# log P(observation|class) = SUM_i log P(observation_i|class)
probs = [None] * len(observation)
for j in range(len(observation)):
probs[j] = nb.p_conditional[i][j].get(observation[j], 0)
lprobs = numpy.log(numpy.clip(probs, 1.e-300, 1.e+300))
lp_observation_class[i] = sum(lprobs)
# Calculate log P(class).
lp_prior = numpy.log(nb.p_prior)
# Calculate log P(observation).
lp_observation = 0.0 # P(observation)
if scale: # Only calculate this if requested.
# log P(observation) = log SUM_i P(observation|class_i)P(class_i)
obs = numpy.exp(numpy.clip(lp_prior+lp_observation_class, -700, +700))
lp_observation = numpy.log(sum(obs))
# Calculate log P(class|observation).
lp_class_observation = {} # Dict of class : log P(class|observation)
for i in range(len(nb.classes)):
lp_class_observation[nb.classes[i]] = \
lp_observation_class[i] + lp_prior[i] - lp_observation
return lp_class_observation
def classify(nb, observation):
"""classify(nb, observation) -> class
Classify an observation into a class.
"""
# The class is the one with the highest probability.
probs = calculate(nb, observation, scale=0)
max_prob = max_class = None
for klass in nb.classes:
if max_prob is None or probs[klass] > max_prob:
max_prob, max_class = probs[klass], klass
return max_class
def train(training_set, results, priors=None, typecode=None):
"""train(training_set, results[, priors]) -> NaiveBayes
Train a naive bayes classifier on a training set. training_set is a
list of observations. results is a list of the class assignments
for each observation. Thus, training_set and results must be the same
length. priors is an optional dictionary specifying the prior
probabilities for each type of result. If not specified, the priors
will be estimated from the training results.
"""
if not len(training_set):
raise ValueError("No data in the training set.")
if len(training_set) != len(results):
raise ValueError("training_set and results should be parallel lists.")
# If no typecode is specified, try to pick a reasonable one. If
# training_set is a Numeric array, then use that typecode.
# Otherwise, choose a reasonable default.
# XXX NOT IMPLEMENTED
# Check to make sure each vector in the training set has the same
# dimensionality.
dimensions = [len(x) for x in training_set]
if min(dimensions) != max(dimensions):
raise ValueError("observations have different dimensionality")
nb = NaiveBayes()
nb.dimensionality = dimensions[0]
# Get a list of all the classes, and
# estimate the prior probabilities for the classes.
if priors is not None:
percs = priors
nb.classes = list(set(results))
else:
class_freq = _contents(results)
nb.classes = list(class_freq.keys())
percs = class_freq
nb.classes.sort() # keep it tidy
nb.p_prior = numpy.zeros(len(nb.classes))
for i in range(len(nb.classes)):
nb.p_prior[i] = percs[nb.classes[i]]
# Collect all the observations in class. For each class, make a
# matrix of training instances versus dimensions. I might be able
# to optimize this with Numeric, if the training_set parameter
# were guaranteed to be a matrix. However, this may not be the
# case, because the client may be hacking up a sparse matrix or
# something.
c2i = {} # class to index of class
for index, key in enumerate(nb.classes):
c2i[key] = index
observations = [[] for c in nb.classes] # separate observations by class
for i in range(len(results)):
klass, obs = results[i], training_set[i]
observations[c2i[klass]].append(obs)
# Now make the observations Numeric matrics.
for i in range(len(observations)):
# XXX typecode must be specified!
observations[i] = numpy.asarray(observations[i], typecode)
# Calculate P(value|class,dim) for every class.
# This is a good loop to optimize.
nb.p_conditional = []
for i in range(len(nb.classes)):
class_observations = observations[i] # observations for this class
nb.p_conditional.append([None] * nb.dimensionality)
for j in range(nb.dimensionality):
# Collect all the values in this dimension.
values = class_observations[:, j]
# Add pseudocounts here. This needs to be parameterized.
#values = list(values) + range(len(nb.classes)) # XXX add 1
# Estimate P(value|class,dim)
nb.p_conditional[i][j] = _contents(values)
return nb
if __name__ == "__main__":
# Car data from example 'Naive Bayes Classifier example' by Eric Meisner November 22, 2003
# http://www.inf.u-szeged.hu/~ormandi/teaching/mi2/02-naiveBayes-example.pdf
xcar=[
['Red', 'Sports', 'Domestic'],
['Red', 'Sports', 'Domestic'],
['Red', 'Sports', 'Domestic'],
['Yellow', 'Sports', 'Domestic'],
['Yellow', 'Sports', 'Imported'],
['Yellow', 'SUV', 'Imported'],
['Yellow', 'SUV', 'Imported'],
['Yellow', 'SUV', 'Domestic'],
['Red', 'SUV', 'Imported'],
['Red', 'Sports', 'Imported']
]
ycar=[
'Yes',
'No',
'Yes',
'No',
'Yes',
'No',
'Yes',
'No',
'No',
'Yes'
]
carmodel = train(xcar, ycar)
carresult = classify(carmodel, ['Red', 'Sports', 'Domestic'])
print('Is Yes? %s' % carresult)
carresult = classify(carmodel, ['Red', 'SUV', 'Domestic'])
print('Is No? %s' % carresult)
| 36.516807
| 94
| 0.6547
|
4a087950392d67afee8d36908ae51aad71cae423
| 3,314
|
py
|
Python
|
velbusaio/messages/cover_down.py
|
brefra/velbus-aio
|
daf7f3f331bd3bea9ad51437d5aaa66d649378e0
|
[
"Apache-2.0"
] | null | null | null |
velbusaio/messages/cover_down.py
|
brefra/velbus-aio
|
daf7f3f331bd3bea9ad51437d5aaa66d649378e0
|
[
"Apache-2.0"
] | null | null | null |
velbusaio/messages/cover_down.py
|
brefra/velbus-aio
|
daf7f3f331bd3bea9ad51437d5aaa66d649378e0
|
[
"Apache-2.0"
] | null | null | null |
"""
:author: Tom Dupré <gitd8400@gmail.com>
"""
from __future__ import annotations
import json
import logging
import struct
from velbusaio.command_registry import register_command
from velbusaio.message import Message
COMMAND_CODE = 0x06
class CoverDownMessage(Message):
"""
sent by:
received by: VMB2BLE
"""
def __init__(self, address=None):
Message.__init__(self)
self.channel = 0
self.delay_time = 0
self.set_defaults(address)
def populate(self, priority, address, rtr, data):
"""
:return: None
"""
self.needs_high_priority(priority)
self.needs_no_rtr(rtr)
self.needs_data(data, 4)
self.set_attributes(priority, address, rtr)
self.channel = self.byte_to_channel(data[0])
(self.delay_time,) = struct.unpack(">L", bytes([0]) + data[1:])
def to_json(self):
"""
:return: str
"""
json_dict = self.to_json_basic()
json_dict["channel"] = self.channel
json_dict["delay_time"] = self.delay_time
return json.dumps(json_dict)
def set_defaults(self, address):
if address is not None:
self.set_address(address)
self.set_high_priority()
self.set_no_rtr()
def data_to_binary(self):
"""
:return: bytes
"""
return (
bytes([COMMAND_CODE, self.channels_to_byte([self.channel])])
+ struct.pack(">L", self.delay_time)[-3:]
)
class CoverDownMessage2(Message):
"""
sent by:
received by: VMB1BL VMB2BL
"""
def __init__(self, address=None):
Message.__init__(self)
self.channel = 0
self.delay_time = 0
self.logger = logging.getLogger("velbus")
self.set_defaults(address)
def populate(self, priority, address, rtr, data):
"""
:return: None
"""
self.needs_high_priority(priority)
self.needs_no_rtr(rtr)
self.needs_data(data, 4)
self.set_attributes(priority, address, rtr)
# 00000011 = channel 1
# 00001100 = channel 2
# so shift 1 bit to the right + and with 03
tmp = (data[0] >> 1) & 0x03
self.channel = self.byte_to_channel(tmp)
(self.delay_time,) = struct.unpack(">L", bytes([0]) + data[1:])
def to_json(self):
"""
:return: str
"""
json_dict = self.to_json_basic()
json_dict["channel"] = self.channel
json_dict["delay_time"] = self.delay_time
return json.dumps(json_dict)
def set_defaults(self, address):
if address is not None:
self.set_address(address)
self.set_high_priority()
self.set_no_rtr()
def data_to_binary(self):
"""
:return: bytes
"""
if self.channel == 0x01:
tmp = 0x03
else:
tmp = 0x0C
return bytes([COMMAND_CODE, tmp]) + struct.pack(">L", self.delay_time)[-3:]
register_command(COMMAND_CODE, CoverDownMessage, "VMB1BLE")
register_command(COMMAND_CODE, CoverDownMessage, "VMB2BLE")
register_command(COMMAND_CODE, CoverDownMessage, "VMB1BLS")
register_command(COMMAND_CODE, CoverDownMessage2, "VMB1BL")
register_command(COMMAND_CODE, CoverDownMessage2, "VMB2BL")
| 26.725806
| 83
| 0.604406
|
4a0879650c0664f304f32575c6e7b866db2e7941
| 5,999
|
py
|
Python
|
test/features/steps/transformer_steps.py
|
mapleknight/molecular-data-provider
|
111418ed7efab3c393c22116854fd322ff367fb9
|
[
"MIT"
] | 5
|
2020-08-28T09:30:16.000Z
|
2021-12-29T16:00:26.000Z
|
test/features/steps/transformer_steps.py
|
mapleknight/molecular-data-provider
|
111418ed7efab3c393c22116854fd322ff367fb9
|
[
"MIT"
] | 19
|
2021-04-26T22:19:16.000Z
|
2022-03-09T21:09:55.000Z
|
test/features/steps/transformer_steps.py
|
mapleknight/molecular-data-provider
|
111418ed7efab3c393c22116854fd322ff367fb9
|
[
"MIT"
] | 8
|
2020-07-09T18:50:57.000Z
|
2022-01-20T16:01:33.000Z
|
from behave import given, when, then
from contextlib import closing
import requests
import jsonpath_rw
import json
@given('a transformer at "{url}"')
def step_impl(context, url):
"""
Given a base URL of a transformer
"""
context.transformer_url = url
@given('the transformer')
def step_impl(context):
"""
Given the gene-list sharpener
"""
context.base_url = context.transformer_url
context.gene_list_id = None
@when('we fire "{query}" query')
def step_impl(context, query):
"""
Fire a knowledge-source query
"""
url = context.base_url+query
print('url:',url,'\n')
with closing(requests.get(url)) as response:
context.response = response
context.response_json = response.json()
@when('we fire "{query}" query with the following body')
def step_impl(context, query):
"""
Fire a knowledge-source query
"""
url = context.base_url+query
print('url:',url,'\n')
with closing(requests.post(url, json=json.loads(context.text))) as response:
context.response = response
context.response_json = response.json()
@then('the value of "{path}" should be "{value}"')
def step_impl(context, path, value):
"""
This step checks value specified by the path
"""
json_path_expr = jsonpath_rw.parse(path)
result = json_path_expr.find(context.response_json)
print(result)
assert result[0].value == value
@then('the int value of "{path}" should be {value}')
def step_impl(context, path, value):
"""
This step checks value specified by the path
"""
json_path_expr = jsonpath_rw.parse(path)
result = json_path_expr.find(context.response_json)
print(result)
assert result[0].value == int(value)
@then('the size of "{path}" should be {size}')
def step_impl(context, path, size):
"""
This step checks size specified by the path
"""
json_path_expr = jsonpath_rw.parse(path)
result = json_path_expr.find(context.response_json)
print(result)
print("len = ",len(result[0].value))
assert len(result[0].value) == int(size)
@then('the response contains the following entries in "{key}" of "{parent}"')
def step_impl(context, key, parent):
"""
This step checks whether all values specified in the test are contained in the response
"""
entries = set()
print('Collected entries:')
for entry in context.response_json:
print(' ', entry[parent][key])
entries.add(entry[parent][key])
print('Tested entries:')
for row in context.table:
print(' ', row[key])
assert row[key] in entries
@then('the response contains the following entries in "{key}" of "{parent}" array')
def step_impl(context, key, parent):
"""
This step checks whether all values specified in the test are contained in the response
"""
entries = set()
print('Collected entries:')
for entry in context.response_json:
for element in entry[parent]:
if key in element:
if element[key] not in entries:
print(' ', element[key])
entries.add(element[key])
print('Tested entries:')
for row in context.table:
print(' ', row[key])
assert row[key] in entries
@then('the response contains the following entries in "{key}"')
def step_impl(context, key):
"""
This step checks whether all values specified in the test are contained in the response
"""
entries = set()
print('Collected entries:')
for entry in context.response_json:
print(' ', entry[key])
entries.add(entry[key])
print('Tested entries:')
for row in context.table:
print(' ', row[key])
assert row[key] in entries
@then('the response contains "{value}" in "{key}"')
def step_impl(context, key, value):
"""
This step checks whether all values specified in the test are contained in the response
"""
entries = set()
print('Collected entries:')
for entry in context.response_json:
print(' ', entry[key])
entries.add(entry[key])
print('Tested entry:')
print(' ', value)
assert value in entries
@then('the response only contains the following entries in "{key}" of "{parent}"')
def step_impl(context, key, parent):
"""
This step checks whether all values found in the response are contained in the test table
"""
entries = set()
print('Collected entries:')
for row in context.table:
print(' ', row[key])
entries.add(row[key])
print('Tested entries:')
for entry in context.response_json:
print(' ', entry[parent][key])
assert entry[parent][key] in entries
@then('the response only contains the following entries in "{key}" of "{parent}" array')
def step_impl(context, key, parent):
"""
This step checks whether all values found in the response are contained in the test table
"""
entries = set()
print('Collected entries:')
for row in context.table:
print(' ', row[key])
entries.add(row[key])
print('Tested entries:')
for entry in context.response_json:
for element in entry[parent]:
print(' ', element[key])
assert element[key] in entries
@then('the response only contains the following entries in "{key}"')
def step_impl(context, key):
"""
This step checks whether all values found in the response are contained in the test table
"""
entries = set()
print('Collected entries:')
for row in context.table:
print(' ', row[key])
entries.add(row[key])
print('Tested entries:')
for entry in context.response_json:
print(' ', entry[key])
assert entry[key] in entries
@then('the size of the response is {size}')
def step_impl(context, size):
"""
This step checks the size of the response
"""
print("len=",len(context.response_json))
assert len(context.response_json) == int(size)
| 29.263415
| 93
| 0.641607
|
4a087988b6188cd0959fbf92279bd79cce21a1a5
| 3,463
|
py
|
Python
|
homeassistant/components/homewizard/__init__.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/homewizard/__init__.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
homeassistant/components/homewizard/__init__.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""The Homewizard integration."""
import logging
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_IP_ADDRESS
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import entity_registry as er
from .const import DOMAIN, PLATFORMS
from .coordinator import HWEnergyDeviceUpdateCoordinator as Coordinator
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Homewizard from a config entry."""
_LOGGER.debug("__init__ async_setup_entry")
# Migrate `homewizard_energy` (custom_component) to `homewizard`
if entry.source == SOURCE_IMPORT and "old_config_entry_id" in entry.data:
# Remove the old config entry ID from the entry data so we don't try this again
# on the next setup
data = entry.data.copy()
old_config_entry_id = data.pop("old_config_entry_id")
hass.config_entries.async_update_entry(entry, data=data)
_LOGGER.debug(
(
"Setting up imported homewizard_energy entry %s for the first time as "
"homewizard entry %s"
),
old_config_entry_id,
entry.entry_id,
)
ent_reg = er.async_get(hass)
for entity in er.async_entries_for_config_entry(ent_reg, old_config_entry_id):
_LOGGER.debug("Removing %s", entity.entity_id)
ent_reg.async_remove(entity.entity_id)
_LOGGER.debug("Re-creating %s for the new config entry", entity.entity_id)
# We will precreate the entity so that any customizations can be preserved
new_entity = ent_reg.async_get_or_create(
entity.domain,
DOMAIN,
entity.unique_id,
suggested_object_id=entity.entity_id.split(".")[1],
disabled_by=entity.disabled_by,
config_entry=entry,
original_name=entity.original_name,
original_icon=entity.original_icon,
)
_LOGGER.debug("Re-created %s", new_entity.entity_id)
# If there are customizations on the old entity, apply them to the new one
if entity.name or entity.icon:
ent_reg.async_update_entity(
new_entity.entity_id, name=entity.name, icon=entity.icon
)
# Remove the old config entry and now the entry is fully migrated
hass.async_create_task(hass.config_entries.async_remove(old_config_entry_id))
# Create coordinator
coordinator = Coordinator(hass, entry.data[CONF_IP_ADDRESS])
try:
await coordinator.async_config_entry_first_refresh()
except ConfigEntryNotReady:
await coordinator.api.close()
raise
# Finalize
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
_LOGGER.debug("__init__ async_unload_entry")
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
config_data = hass.data[DOMAIN].pop(entry.entry_id)
await config_data.api.close()
return unload_ok
| 36.840426
| 87
| 0.678891
|
4a087a07f22e8a922faab587114b44115c320d69
| 1,468
|
py
|
Python
|
cognitive/settings/local.py
|
parksurk/sk-watson-hackerthon
|
3677c7f068634809199bfbf06826c153f8b36eb9
|
[
"Apache-2.0"
] | 1
|
2018-12-01T08:46:43.000Z
|
2018-12-01T08:46:43.000Z
|
cognitive/settings/local.py
|
parksurk/sk-watson-hackerthon
|
3677c7f068634809199bfbf06826c153f8b36eb9
|
[
"Apache-2.0"
] | null | null | null |
cognitive/settings/local.py
|
parksurk/sk-watson-hackerthon
|
3677c7f068634809199bfbf06826c153f8b36eb9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Import all settings from base.py
from .base import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'ourFormatter': {
'format': '%(asctime)s:%(name)s:%(levelname)s:%(message)s',
'datefmt': '%m/%d/%Y %I:%M:%S',
},
},
'handlers': {
'theConsole': {
'class': 'logging.StreamHandler',
'formatter': 'ourFormatter',
},
},
'root': {
'level': 'INFO',
'handlers': ['theConsole'],
},
}
| 28.784314
| 74
| 0.626022
|
4a087a38feef7bc94f59b6f6ba5b3501cb5bb4ec
| 21,427
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_express_route_cross_connection_peerings_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_express_route_cross_connection_peerings_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_express_route_cross_connection_peerings_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCrossConnectionPeeringsOperations(object):
"""ExpressRouteCrossConnectionPeeringsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ExpressRouteCrossConnectionPeeringList"]
"""Gets all peerings in a specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCrossConnectionPeeringList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.ExpressRouteCrossConnectionPeeringList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCrossConnectionPeeringList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionPeeringList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified peering from the ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ExpressRouteCrossConnectionPeering"
"""Gets the specified peering for the ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCrossConnectionPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.ExpressRouteCrossConnectionPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCrossConnectionPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
peering_parameters, # type: "models.ExpressRouteCrossConnectionPeering"
**kwargs # type: Any
):
# type: (...) -> "models.ExpressRouteCrossConnectionPeering"
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCrossConnectionPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCrossConnectionPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
peering_parameters, # type: "models.ExpressRouteCrossConnectionPeering"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ExpressRouteCrossConnectionPeering"]
"""Creates or updates a peering in the specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update
ExpressRouteCrossConnection peering operation.
:type peering_parameters: ~azure.mgmt.network.v2019_07_01.models.ExpressRouteCrossConnectionPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCrossConnectionPeering or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.ExpressRouteCrossConnectionPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCrossConnectionPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
peering_parameters=peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
| 50.298122
| 236
| 0.675036
|
4a087a784ade12e736e81932583b8d7ccf540062
| 24,164
|
py
|
Python
|
fairseq/sequence_generator.py
|
wangqiangneu/dlcl
|
c632ee2bc22a007541243d8d6d2820697c62111b
|
[
"BSD-3-Clause"
] | 115
|
2019-05-26T01:59:51.000Z
|
2022-03-30T08:02:19.000Z
|
fairseq/sequence_generator.py
|
lisa-sicuan/dlcl
|
c632ee2bc22a007541243d8d6d2820697c62111b
|
[
"BSD-3-Clause"
] | 6
|
2019-05-28T01:50:30.000Z
|
2022-03-31T11:09:30.000Z
|
fairseq/sequence_generator.py
|
lisa-sicuan/dlcl
|
c632ee2bc22a007541243d8d6d2820697c62111b
|
[
"BSD-3-Clause"
] | 28
|
2019-05-27T08:44:45.000Z
|
2022-01-25T04:26:57.000Z
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import torch
from fairseq import utils
from fairseq.models import FairseqIncrementalDecoder
class SequenceGenerator(object):
def __init__(
self, models, tgt_dict, beam_size=1, minlen=1, maxlen=None, stop_early=True,
normalize_scores=True, len_penalty=1, unk_penalty=0, retain_dropout=False,
sampling=False, sampling_topk=-1, sampling_temperature=1,
):
"""Generates translations of a given source sentence.
Args:
min/maxlen: The length of the generated output will be bounded by
minlen and maxlen (not including the end-of-sentence marker).
stop_early: Stop generation immediately after we finalize beam_size
hypotheses, even though longer hypotheses might have better
normalized scores.
normalize_scores: Normalize scores by the length of the output.
"""
self.models = models
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
self.minlen = minlen
max_decoder_len = min(m.max_decoder_positions() for m in self.models)
max_decoder_len -= 1 # we define maxlen not including the EOS marker
self.maxlen = max_decoder_len if maxlen is None else min(maxlen, max_decoder_len)
self.stop_early = stop_early
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.retain_dropout = retain_dropout
self.sampling = sampling
self.sampling_topk = sampling_topk
self.sampling_temperature = sampling_temperature
def cuda(self):
for model in self.models:
model.cuda()
return self
def generate_batched_itr(
self, data_itr, beam_size=None, maxlen_a=0.0, maxlen_b=None,
cuda=False, timer=None, prefix_size=0,
):
"""Iterate over a batched dataset and yield individual translations.
Args:
maxlen_a/b: generate sequences of maximum length ax + b,
where x is the source sentence length.
cuda: use GPU for generation
timer: StopwatchMeter for timing generations.
"""
if maxlen_b is None:
maxlen_b = self.maxlen
for sample in data_itr:
s = utils.move_to_cuda(sample) if cuda else sample
if 'net_input' not in s:
continue
input = s['net_input']
srclen = input['src_tokens'].size(1)
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(
input['src_tokens'],
input['src_lengths'],
beam_size=beam_size,
maxlen=int(maxlen_a*srclen + maxlen_b),
prefix_tokens=s['target'][:, :prefix_size] if prefix_size > 0 else None,
)
if timer is not None:
timer.stop(sum(len(h[0]['tokens']) for h in hypos))
for i, id in enumerate(s['id'].data):
# remove padding
src = utils.strip_pad(input['src_tokens'].data[i, :], self.pad)
ref = utils.strip_pad(s['target'].data[i, :], self.pad) if s['target'] is not None else None
yield id, src, ref, hypos[i]
def generate(self, src_tokens, src_lengths, beam_size=None, maxlen=None, prefix_tokens=None):
"""Generate a batch of translations."""
with torch.no_grad():
return self._generate(src_tokens, src_lengths, beam_size, maxlen, prefix_tokens)
def _generate(self, src_tokens, src_lengths, beam_size=None, maxlen=None, prefix_tokens=None):
bsz, srclen = src_tokens.size()
maxlen = min(maxlen, self.maxlen) if maxlen is not None else self.maxlen
# the max beam size is the dictionary size - 1, since we never select pad
beam_size = beam_size if beam_size is not None else self.beam_size
beam_size = min(beam_size, self.vocab_size - 1)
encoder_outs = []
incremental_states = {}
for model in self.models:
if not self.retain_dropout:
model.eval()
if isinstance(model.decoder, FairseqIncrementalDecoder):
incremental_states[model] = {}
else:
incremental_states[model] = None
# compute the encoder output for each beam
encoder_out = model.encoder(src_tokens, src_lengths)
encoder_out['encoder_out'] = encoder_out['encoder_out'].repeat(1, 1, beam_size).view(
srclen, bsz * beam_size, -1)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = encoder_out['encoder_padding_mask'].repeat(1, beam_size).view(
-1, srclen)
encoder_outs.append(encoder_out)
# initialize buffers
scores = src_tokens.data.new(bsz * beam_size, maxlen + 1).float().fill_(0)
scores_buf = scores.clone()
tokens = src_tokens.data.new(bsz * beam_size, maxlen + 2).fill_(self.pad)
tokens_buf = tokens.clone()
tokens[:, 0] = self.eos
attn = scores.new(bsz * beam_size, src_tokens.size(1), maxlen + 2)
attn_buf = attn.clone()
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
worst_finalized = [{'idx': None, 'score': -math.inf} for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfinalized_scores=None):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size:
if self.stop_early or step == maxlen or unfinalized_scores is None:
return True
# stop if the best unfinalized score is worse than the worst
# finalized one
best_unfinalized_score = unfinalized_scores[sent].max()
if self.normalize_scores:
best_unfinalized_score /= maxlen ** self.len_penalty
if worst_finalized[sent]['score'] >= best_unfinalized_score:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores, unfinalized_scores=None):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
scores for each hypothesis
unfinalized_scores: A vector containing scores for all
unfinalized hypotheses
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2]
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
def get_hypo():
# remove padding tokens from attn scores
nonpad_idxs = src_tokens[sent].ne(self.pad)
hypo_attn = attn_clone[i][nonpad_idxs]
_, alignment = hypo_attn.max(dim=0)
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': alignment,
'positional_scores': pos_scores[i],
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
elif not self.stop_early and score > worst_finalized[sent]['score']:
# replace worst hypo for this sentence with new/better one
worst_idx = worst_finalized[sent]['idx']
if worst_idx is not None:
finalized[sent][worst_idx] = get_hypo()
# find new worst finalized hypo for this sentence
idx, s = min(enumerate(finalized[sent]), key=lambda r: r[1]['score'])
worst_finalized[sent] = {
'score': s['score'],
'idx': idx,
}
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfinalized_scores):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
reorder_state = None
batch_idxs = None
for step in range(maxlen + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
for i, model in enumerate(self.models):
if isinstance(model.decoder, FairseqIncrementalDecoder):
model.decoder.reorder_incremental_state(incremental_states[model], reorder_state)
encoder_outs[i] = model.decoder.reorder_encoder_out(encoder_outs[i], reorder_state)
probs, avg_attn_scores = self._decode(
tokens[:, :step + 1], encoder_outs, incremental_states)
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
probs = probs.unfold(0, 1, beam_size).squeeze(2).contiguous()
scores = scores.type_as(probs)
scores_buf = scores_buf.type_as(probs)
elif not self.sampling:
# make probs contain cumulative scores for each hypothesis
probs.add_(scores[:, step - 1].view(-1, 1))
probs[:, self.pad] = -math.inf # never select pad
probs[:, self.unk] -= self.unk_penalty # apply unk penalty
# Record attention scores
attn[:, :, step + 1].copy_(avg_attn_scores)
cand_scores = buffer('cand_scores', type_of=scores)
cand_indices = buffer('cand_indices')
cand_beams = buffer('cand_beams')
eos_bbsz_idx = buffer('eos_bbsz_idx')
eos_scores = buffer('eos_scores', type_of=scores)
if step < maxlen:
if prefix_tokens is not None and step < prefix_tokens.size(1):
probs_slice = probs.view(bsz, -1, probs.size(-1))[:, 0, :]
cand_scores = torch.gather(
probs_slice, dim=1,
index=prefix_tokens[:, step].view(-1, 1).data
).expand(-1, cand_size)
cand_indices = prefix_tokens[:, step].view(-1, 1).expand(bsz, cand_size).data
cand_beams.resize_as_(cand_indices).fill_(0)
elif self.sampling:
assert self.pad == 1, 'sampling assumes the first two symbols can be ignored'
if self.sampling_topk > 0:
values, indices = probs[:, 2:].topk(self.sampling_topk)
exp_probs = values.div_(self.sampling_temperature).exp()
if step == 0:
torch.multinomial(exp_probs, beam_size, replacement=True, out=cand_indices)
else:
torch.multinomial(exp_probs, 1, replacement=True, out=cand_indices)
torch.gather(exp_probs, dim=1, index=cand_indices, out=cand_scores)
torch.gather(indices, dim=1, index=cand_indices, out=cand_indices)
cand_indices.add_(2)
else:
exp_probs = probs.div_(self.sampling_temperature).exp_().view(-1, self.vocab_size)
if step == 0:
# we exclude the first two vocab items, one of which is pad
torch.multinomial(exp_probs[:, 2:], beam_size, replacement=True, out=cand_indices)
else:
torch.multinomial(exp_probs[:, 2:], 1, replacement=True, out=cand_indices)
cand_indices.add_(2)
torch.gather(exp_probs, dim=1, index=cand_indices, out=cand_scores)
cand_scores.log_()
cand_indices = cand_indices.view(bsz, -1).repeat(1, 2)
cand_scores = cand_scores.view(bsz, -1).repeat(1, 2)
if step == 0:
cand_beams = torch.zeros(bsz, cand_size).type_as(cand_indices)
else:
cand_beams = torch.arange(0, beam_size).repeat(bsz, 2).type_as(cand_indices)
# make scores cumulative
cand_scores.add_(
torch.gather(
scores[:, step - 1].view(bsz, beam_size), dim=1,
index=cand_beams,
)
)
else:
# take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
torch.topk(
probs.view(bsz, -1),
k=min(cand_size, probs.view(bsz, -1).size(1) - 1), # -1 so we never select pad
out=(cand_scores, cand_indices),
)
torch.div(cand_indices, self.vocab_size, out=cand_beams)
cand_indices.fmod_(self.vocab_size)
else:
# finalize all active hypotheses once we hit maxlen
# pick the hypothesis with the highest prob of EOS right now
torch.sort(
probs[:, self.eos],
descending=True,
out=(eos_scores, eos_bbsz_idx),
)
num_remaining_sent -= len(finalize_hypos(
step, eos_bbsz_idx, eos_scores))
assert num_remaining_sent == 0
break
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
eos_mask = cand_indices.eq(self.eos)
finalized_sents = set()
if step >= self.minlen:
# only consider eos when it's among the top beam_size indices
torch.masked_select(
cand_bbsz_idx[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_bbsz_idx,
)
if eos_bbsz_idx.numel() > 0:
torch.masked_select(
cand_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_scores,
)
finalized_sents = finalize_hypos(
step, eos_bbsz_idx, eos_scores, cand_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
assert step < maxlen
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = torch.ones(bsz).type_as(cand_indices)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = batch_mask.nonzero().squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
active_mask = buffer('active_mask')
torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[:eos_mask.size(1)],
out=active_mask,
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, _ignore = buffer('active_hypos'), buffer('_ignore')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(_ignore, active_hypos)
)
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx,
)
active_scores = torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
# copy attention for active hypotheses
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
return finalized
def _decode(self, tokens, encoder_outs, incremental_states):
if len(self.models) == 1:
return self._decode_one(tokens, self.models[0], encoder_outs[0], incremental_states, log_probs=True)
avg_probs = None
avg_attn = None
for model, encoder_out in zip(self.models, encoder_outs):
probs, attn = self._decode_one(tokens, model, encoder_out, incremental_states, log_probs=False)
if avg_probs is None:
avg_probs = probs
else:
avg_probs.add_(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs.div_(len(self.models))
avg_probs.log_()
if avg_attn is not None:
avg_attn.div_(len(self.models))
return avg_probs, avg_attn
def _decode_one(self, tokens, model, encoder_out, incremental_states, log_probs):
with torch.no_grad():
if incremental_states[model] is not None:
decoder_out = list(model.decoder(tokens, encoder_out, incremental_states[model]))
else:
decoder_out = list(model.decoder(tokens, encoder_out))
decoder_out[0] = decoder_out[0][:, -1, :]
attn = decoder_out[1]
if attn is not None:
attn = attn[:, -1, :]
probs = model.get_normalized_probs(decoder_out, log_probs=log_probs)
return probs, attn
| 45.421053
| 116
| 0.55413
|
4a087bdb7f8c28c71ab6f6b6d0b8ab7e17126e89
| 1,909
|
py
|
Python
|
Numpy-Project/code.py
|
shaileshcgautam/greyatom-python-for-data-science
|
24dbe6c936be9e294cabf2e724e2ce91c4318210
|
[
"MIT"
] | null | null | null |
Numpy-Project/code.py
|
shaileshcgautam/greyatom-python-for-data-science
|
24dbe6c936be9e294cabf2e724e2ce91c4318210
|
[
"MIT"
] | null | null | null |
Numpy-Project/code.py
|
shaileshcgautam/greyatom-python-for-data-science
|
24dbe6c936be9e294cabf2e724e2ce91c4318210
|
[
"MIT"
] | null | null | null |
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
#New record
new_record=np.array([[50, 9, 4, 1, 0, 0, 40, 0]])
#Code starts here
data=np.genfromtxt(path, delimiter=",", skip_header=1)
print(data.shape)
census = np.concatenate((data, new_record), axis=0)
print(census)
# --------------
#Code starts here
age = census[...,0]
print(age)
max_age = age.max()
min_age = age.min()
age_mean = age.mean()
age_std = np.std(age)
print(max_age)
print(min_age)
print(age_mean)
print(age_std)
# --------------
#Code starts here
#races = census[...,2]
#print(races)
race_0 = census[census[...,2]==0]
len_0 = np.shape(race_0)[0]
print(len_0)
race_1 = census[census[...,2]==1]
len_1 = np.shape(race_1)[0]
race_2 = census[census[...,2]==2]
len_2 = np.shape(race_2)[0]
race_3 = census[census[...,2]==3]
len_3 = np.shape(race_3)[0]
race_4 = census[census[...,2]==4]
len_4 = np.shape(race_4)[0]
lengths = np.array([len_0, len_1, len_2, len_3, len_4])
min_race_len = lengths.min()
def minority():
if min_race_len == len_0:
return 0
if min_race_len == len_1:
return 1
if min_race_len == len_2:
return 2
if min_race_len == len_3:
return 3
if min_race_len == len_4:
return 4
minority_race = minority()
print(minority())
# --------------
#Code starts here
senior_citizens = census[census[...,0]>60]
working_hours_sum = senior_citizens[...,6].sum()
print(working_hours_sum)
senior_citizens_len = np.shape(senior_citizens)[0]
avg_working_hours = working_hours_sum/senior_citizens_len
print(avg_working_hours)
# --------------
#Code starts here
high = census[census[...,1]>10]
low = census[census[...,1]<=10]
avg_pay_high=high[...,7].mean()
avg_pay_low = low[...,7].mean()
print(avg_pay_high)
print(avg_pay_low)
| 21.449438
| 61
| 0.624411
|
4a087c4fb54f3c05b161d3f15438dde81b318d0e
| 13,362
|
py
|
Python
|
newrelic/core/trace_cache.py
|
newrelic/newrelic-python-agen
|
4f292ec1219c0daffc5721a7b3a245b97d0f83ba
|
[
"Apache-2.0"
] | 92
|
2020-06-12T17:53:23.000Z
|
2022-03-01T11:13:21.000Z
|
newrelic/core/trace_cache.py
|
newrelic/newrelic-python-agen
|
4f292ec1219c0daffc5721a7b3a245b97d0f83ba
|
[
"Apache-2.0"
] | 347
|
2020-07-10T00:10:19.000Z
|
2022-03-31T17:58:56.000Z
|
newrelic/core/trace_cache.py
|
newrelic/newrelic-python-agen
|
4f292ec1219c0daffc5721a7b3a245b97d0f83ba
|
[
"Apache-2.0"
] | 58
|
2020-06-17T13:51:57.000Z
|
2022-03-06T14:26:53.000Z
|
# Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements a global cache for tracking any traces.
"""
import logging
import random
import sys
import threading
import traceback
import weakref
try:
import thread
except ImportError:
import _thread as thread
from newrelic.core.config import global_settings
from newrelic.core.loop_node import LoopNode
_logger = logging.getLogger(__name__)
def current_task(asyncio):
if not asyncio:
return
current_task = getattr(asyncio, "current_task", None)
if current_task is None:
current_task = getattr(asyncio.Task, "current_task", None)
try:
return current_task()
except:
pass
def all_tasks(asyncio):
if not asyncio:
return
all_tasks = getattr(asyncio, "all_tasks", None)
if all_tasks is None:
all_tasks = getattr(asyncio.Task, "all_tasks", None)
try:
return all_tasks()
except:
pass
def get_event_loop(task):
get_loop = getattr(task, "get_loop", None)
if get_loop:
return get_loop()
return getattr(task, "_loop", None)
class cached_module(object):
def __init__(self, module_path, name=None):
self.module_path = module_path
self.name = name or module_path
def __get__(self, instance, owner=None):
if instance is None:
return self
module = sys.modules.get(self.module_path)
if module:
instance.__dict__[self.name] = module
return module
class TraceCacheNoActiveTraceError(RuntimeError):
pass
class TraceCacheActiveTraceError(RuntimeError):
pass
class TraceCache(object):
asyncio = cached_module("asyncio")
greenlet = cached_module("greenlet")
def __init__(self):
self._cache = weakref.WeakValueDictionary()
def __repr__(self):
return "<%s object at 0x%x %s>" % (self.__class__.__name__, id(self), str(dict(self._cache.items())))
def current_thread_id(self):
"""Returns the thread ID for the caller.
When greenlets are present and we detect we are running in the
greenlet then we use the greenlet ID instead of the thread ID.
"""
if self.greenlet:
# Greenlet objects are maintained in a tree structure with
# the 'parent' attribute pointing to that which a specific
# instance is associated with. Only the root node has no
# parent. This node is special and is the one which
# corresponds to the original thread where the greenlet
# module was imported and initialised. That root greenlet is
# never actually running and we should always ignore it. In
# all other cases where we can obtain a current greenlet,
# then it should indicate we are running as a greenlet.
current = self.greenlet.getcurrent()
if current is not None and current.parent:
return id(current)
if self.asyncio:
task = current_task(self.asyncio)
if task is not None:
return id(task)
return thread.get_ident()
def task_start(self, task):
trace = self.current_trace()
if trace:
self._cache[id(task)] = trace
def task_stop(self, task):
self._cache.pop(id(task), None)
def current_transaction(self):
"""Return the transaction object if one exists for the currently
executing thread.
"""
trace = self._cache.get(self.current_thread_id())
return trace and trace.transaction
def current_trace(self):
return self._cache.get(self.current_thread_id())
def active_threads(self):
"""Returns an iterator over all current stack frames for all
active threads in the process. The result for each is a tuple
consisting of the thread identifier, a categorisation of the
type of thread, and the stack frame. Note that we actually treat
any greenlets as threads as well. In that case the thread ID is
the id() of the greenlet.
This is in this class for convenience as needs to access the
currently active transactions to categorise transaction threads
as being for web transactions or background tasks.
"""
# First yield up those for real Python threads.
for thread_id, frame in sys._current_frames().items():
trace = self._cache.get(thread_id)
transaction = trace and trace.transaction
if transaction is not None:
if transaction.background_task:
yield transaction, thread_id, "BACKGROUND", frame
else:
yield transaction, thread_id, "REQUEST", frame
else:
# Note that there may not always be a thread object.
# This is because thread could have been created direct
# against the thread module rather than via the high
# level threading module. Categorise anything we can't
# obtain a name for as being 'OTHER'.
thread = threading._active.get(thread_id)
if thread is not None and thread.getName().startswith("NR-"):
yield None, thread_id, "AGENT", frame
else:
yield None, thread_id, "OTHER", frame
# Now yield up those corresponding to greenlets. Right now only
# doing this for greenlets in which any active transactions are
# running. We don't have a way of knowing what non transaction
# threads are running.
debug = global_settings().debug
if debug.enable_coroutine_profiling:
for thread_id, trace in self._cache.items():
transaction = trace.transaction
if transaction and transaction._greenlet is not None:
gr = transaction._greenlet()
if gr and gr.gr_frame is not None:
if transaction.background_task:
yield (transaction, thread_id, "BACKGROUND", gr.gr_frame)
else:
yield (transaction, thread_id, "REQUEST", gr.gr_frame)
def prepare_for_root(self):
"""Updates the cache state so that a new root can be created if the
trace in the cache is from a different task (for asyncio). Returns the
current trace after the cache is updated."""
thread_id = self.current_thread_id()
trace = self._cache.get(thread_id)
if not trace:
return None
if not hasattr(trace, "_task"):
return trace
task = current_task(self.asyncio)
if task is not None and id(trace._task) != id(task):
self._cache.pop(thread_id, None)
return None
if trace.root and trace.root.exited:
self._cache.pop(thread_id, None)
return None
return trace
def save_trace(self, trace):
"""Saves the specified trace away under the thread ID of
the current executing thread. Will also cache a reference to the
greenlet if using coroutines. This is so we can later determine
the stack trace for a transaction when using greenlets.
"""
thread_id = trace.thread_id
if thread_id in self._cache:
cache_root = self._cache[thread_id].root
if cache_root and cache_root is not trace.root and not cache_root.exited:
# Cached trace exists and has a valid root still
_logger.error(
"Runtime instrumentation error. Attempt to "
"save a trace from an inactive transaction. "
"Report this issue to New Relic support.\n%s",
"".join(traceback.format_stack()[:-1]),
)
raise TraceCacheActiveTraceError("transaction already active")
self._cache[thread_id] = trace
# We judge whether we are actually running in a coroutine by
# seeing if the current thread ID is actually listed in the set
# of all current frames for executing threads. If we are
# executing within a greenlet, then thread.get_ident() will
# return the greenlet identifier. This will not be a key in
# dictionary of all current frames because that will still be
# the original standard thread which all greenlets are running
# within.
trace._greenlet = None
if hasattr(sys, "_current_frames"):
if thread_id not in sys._current_frames():
if self.greenlet:
trace._greenlet = weakref.ref(self.greenlet.getcurrent())
if self.asyncio and not hasattr(trace, "_task"):
task = current_task(self.asyncio)
trace._task = task
def pop_current(self, trace):
"""Restore the trace's parent under the thread ID of the current
executing thread."""
if hasattr(trace, "_task"):
delattr(trace, "_task")
thread_id = trace.thread_id
parent = trace.parent
self._cache[thread_id] = parent
def complete_root(self, root):
"""Completes a trace specified by the given root
Drops the specified root, validating that it is
actually saved away under the current executing thread.
"""
if hasattr(root, "_task"):
if root.has_outstanding_children():
task_ids = (id(task) for task in all_tasks(self.asyncio))
to_complete = []
for task_id in task_ids:
entry = self._cache.get(task_id)
if entry and entry is not root and entry.root is root:
to_complete.append(entry)
while to_complete:
entry = to_complete.pop()
if entry.parent and entry.parent is not root:
to_complete.append(entry.parent)
entry.__exit__(None, None, None)
root._task = None
thread_id = root.thread_id
if thread_id not in self._cache:
thread_id = self.current_thread_id()
if thread_id not in self._cache:
raise TraceCacheNoActiveTraceError("no active trace")
current = self._cache.get(thread_id)
if root is not current:
_logger.error(
"Runtime instrumentation error. Attempt to "
"drop the root when it is not the current "
"trace. Report this issue to New Relic support.\n%s",
"".join(traceback.format_stack()[:-1]),
)
raise RuntimeError("not the current trace")
del self._cache[thread_id]
root._greenlet = None
def record_event_loop_wait(self, start_time, end_time):
transaction = self.current_transaction()
if not transaction or not transaction.settings:
return
settings = transaction.settings.event_loop_visibility
if not settings.enabled:
return
duration = end_time - start_time
transaction._loop_time += duration
if duration < settings.blocking_threshold:
return
fetch_name = transaction._cached_path.path
roots = set()
seen = set()
task = getattr(transaction.root_span, "_task", None)
loop = get_event_loop(task)
for trace in self._cache.values():
if trace in seen:
continue
# If the trace is on a different transaction and it's asyncio
if (
trace.transaction is not transaction
and getattr(trace, "_task", None) is not None
and get_event_loop(trace._task) is loop
and trace._is_leaf()
):
trace.exclusive -= duration
roots.add(trace.root)
seen.add(trace)
seen = None
for root in roots:
guid = "%016x" % random.getrandbits(64)
node = LoopNode(
fetch_name=fetch_name,
start_time=start_time,
end_time=end_time,
duration=duration,
guid=guid,
)
transaction = root.transaction
transaction._process_node(node)
root.increment_child_count()
root.add_child(node)
_trace_cache = TraceCache()
def trace_cache():
return _trace_cache
def greenlet_loaded(module):
_trace_cache.greenlet = module
def asyncio_loaded(module):
_trace_cache.asyncio = module
| 32.830467
| 109
| 0.60949
|
4a087d16ed3813ee0c2f401795714e111be1f6d1
| 35,253
|
py
|
Python
|
src/emhass/forecast.py
|
davidusb-geek/emhass
|
5d6a5ad45c26b819c6bc1cb0e8943940d7fc8f17
|
[
"MIT"
] | 17
|
2021-09-12T22:32:09.000Z
|
2022-03-17T17:45:29.000Z
|
src/emhass/forecast.py
|
davidusb-geek/emhass
|
5d6a5ad45c26b819c6bc1cb0e8943940d7fc8f17
|
[
"MIT"
] | 1
|
2021-12-22T21:10:04.000Z
|
2021-12-22T21:10:04.000Z
|
src/emhass/forecast.py
|
davidusb-geek/emhass
|
5d6a5ad45c26b819c6bc1cb0e8943940d7fc8f17
|
[
"MIT"
] | 2
|
2021-11-03T10:29:05.000Z
|
2021-11-19T12:08:24.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import Optional
import pathlib, pickle, copy, logging, json
import pandas as pd, numpy as np
from datetime import datetime, timedelta
import requests
from bs4 import BeautifulSoup
import pvlib
from pvlib.pvsystem import PVSystem
from pvlib.location import Location
from pvlib.modelchain import ModelChain
from pvlib.temperature import TEMPERATURE_MODEL_PARAMETERS
from pvlib.irradiance import disc
from emhass.retrieve_hass import retrieve_hass
from emhass.utils import get_days_list
class forecast:
"""
Generate weather, load and costs forecasts needed as inputs to the optimization.
In EMHASS we have basically 4 forecasts to deal with:
- PV power production forecast (internally based on the weather forecast and the \
characteristics of your PV plant). This is given in Watts.
- Load power forecast: how much power your house will demand on the next 24h. This \
is given in Watts.
- PV production selling price forecast: at what price are you selling your excess \
PV production on the next 24h. This is given in EUR/kWh.
- Load cost forecast: the price of the energy from the grid on the next 24h. This \
is given in EUR/kWh.
The weather forecast is obtained from two methods. The first method
uses a scrapper to the ClearOutside webpage which proposes detailed forecasts
based on Lat/Lon locations. This method seems quite stable but as with any scrape
method it will fail if any changes are made to the webpage API. The second method
for weather forecast is using a direct read fro a CSV file. With this method we
will consider that we are reading the PV power directly.
The 'get_power_from_weather' method is proposed here to convert from irradiance
data to electrical power. Again PVLib is used to model the PV plant.
For the load forecast two methods are available. The first method allows the user
to use a CSV file with their own forecast. With this method a more powerful
external package for time series forecast may be used to create your own detailed
load forecast. The second method is a naive method, also called persistance.
It simply assumes that the forecast for a future period will be equal to the
observed values in a past period. The past period is controlled using
parameter 'delta_forecast'.
For the PV production selling price and Load cost forecasts the privileged method
is a direct read from a user provided CSV file.
For all the forecasting methods, the CSV file should contain no header and the
timestamped data should have the following format:
2021-04-29 00:00:00+00:00,287.07
2021-04-29 00:30:00+00:00,274.27
2021-04-29 01:00:00+00:00,243.38
...
The data columns in these files will correspond to the data in the units expected
for each forecasting method.
"""
def __init__(self, retrieve_hass_conf: dict, optim_conf: dict, plant_conf: dict,
params: str, config_path: str, logger: logging.Logger,
opt_time_delta: Optional[int] = 24,
get_data_from_file: Optional[bool] = False) -> None:
"""
Define constructor for the forecast class.
:param retrieve_hass_conf: Dictionnary containing the needed configuration
data from the configuration file, specific to retrieve data from HASS
:type retrieve_hass_conf: dict
:param optim_conf: Dictionnary containing the needed configuration
data from the configuration file, specific for the optimization task
:type optim_conf: dict
:param plant_conf: Dictionnary containing the needed configuration
data from the configuration file, specific for the modeling of the PV plant
:type plant_conf: dict
:param params: Configuration parameters passed from data/options.json
:type params: str
:param config_path: The path to the yaml configuration file
:type config_path: str
:param logger: The passed logger object
:type logger: logging object
:param opt_time_delta: The time delta in hours used to generate forecasts,
a value of 24 will generate 24 hours of forecast data, defaults to 24
:type opt_time_delta: int, optional
:param get_data_from_file: Select if data should be retrieved from a
previously saved pickle useful for testing or directly from connection to
hass database
:type get_data_from_file: bool, optional
"""
self.retrieve_hass_conf = retrieve_hass_conf
self.optim_conf = optim_conf
self.plant_conf = plant_conf
self.freq = self.retrieve_hass_conf['freq']
self.time_zone = self.retrieve_hass_conf['time_zone']
self.method_ts_round = self.retrieve_hass_conf['method_ts_round']
self.timeStep = self.freq.seconds/3600 # in hours
self.time_delta = pd.to_timedelta(opt_time_delta, "hours") # The period of optimization
self.var_PV = self.retrieve_hass_conf['var_PV']
self.var_load = self.retrieve_hass_conf['var_load']
self.var_load_new = self.var_load+'_positive'
self.lat = self.retrieve_hass_conf['lat']
self.lon = self.retrieve_hass_conf['lon']
self.root = config_path
self.logger = logger
self.get_data_from_file = get_data_from_file
self.var_load_cost = 'unit_load_cost'
self.var_prod_price = 'unit_prod_price'
if params is None:
self.params = params
else:
self.params = json.loads(params)
if self.method_ts_round == 'nearest':
self.start_forecast = pd.Timestamp(datetime.now(), tz=self.time_zone).replace(microsecond=0)
elif self.method_ts_round == 'first':
self.start_forecast = pd.Timestamp(datetime.now(), tz=self.time_zone).replace(microsecond=0).floor(freq=self.freq)
elif self.method_ts_round == 'last':
self.start_forecast = pd.Timestamp(datetime.now(), tz=self.time_zone).replace(microsecond=0).ceil(freq=self.freq)
else:
self.logger.error("Wrong method_ts_round passed parameter")
self.end_forecast = (self.start_forecast + self.optim_conf['delta_forecast']).replace(microsecond=0)
self.forecast_dates = pd.date_range(start=self.start_forecast,
end=self.end_forecast-self.freq,
freq=self.freq).round(self.freq)
if params is not None:
if 'prediction_horizon' in list(self.params['passed_data'].keys()):
if self.params['passed_data']['prediction_horizon'] is not None:
self.forecast_dates = self.forecast_dates[0:self.params['passed_data']['prediction_horizon']]
def get_weather_forecast(self, method: Optional[str] = 'scrapper',
csv_path: Optional[str] = "/data/data_weather_forecast.csv") -> pd.DataFrame:
"""
Get and generate weather forecast data.
:param method: The desired method, options are 'scrapper' and 'csv', \
defaults to 'scrapper'
:type method: str, optional
:return: The DataFrame containing the forecasted data
:rtype: pd.DataFrame
"""
self.logger.info("Retrieving weather forecast data using method = "+method)
self.weather_forecast_method = method # Saving this attribute for later use to identify csv method usage
if method == 'scrapper':
freq_scrap = pd.to_timedelta(60, "minutes") # The scrapping time step is 60min
forecast_dates_scrap = pd.date_range(start=self.start_forecast,
end=self.end_forecast-freq_scrap,
freq=freq_scrap).round(freq_scrap)
# Using the clearoutside webpage
response = requests.get("https://clearoutside.com/forecast/"+str(round(self.lat, 2))+"/"+str(round(self.lon, 2)))
soup = BeautifulSoup(response.content, 'html.parser')
table = soup.find_all(id='day_0')[0]
list_names = table.find_all(class_='fc_detail_label')
list_tables = table.find_all('ul')[1:]
selected_cols = [0, 1, 2, 3, 10, 12, 15] # Selected variables
col_names = [list_names[i].get_text() for i in selected_cols]
list_tables = [list_tables[i] for i in selected_cols]
# Building the raw DF container
raw_data = pd.DataFrame(index=range(24), columns=col_names, dtype=float)
for count_col, col in enumerate(col_names):
list_rows = list_tables[count_col].find_all('li')
for count_row, row in enumerate(list_rows):
raw_data.loc[count_row, col] = float(row.get_text())
# Treating index
raw_data.set_index(forecast_dates_scrap, inplace=True)
raw_data = raw_data[~raw_data.index.duplicated(keep='first')]
raw_data = raw_data.reindex(self.forecast_dates)
raw_data.interpolate(method='linear', axis=0, limit=None,
limit_direction='both', inplace=True)
# Converting the cloud cover into Global Horizontal Irradiance with a PVLib method
ghi_est = self.cloud_cover_to_irradiance(raw_data['Total Clouds (% Sky Obscured)'])
data = ghi_est
data['temp_air'] = raw_data['Temperature (°C)']
data['wind_speed'] = raw_data['Wind Speed/Direction (mph)']*1.60934 # conversion to km/h
data['relative_humidity'] = raw_data['Relative Humidity (%)']
data['precipitable_water'] = pvlib.atmosphere.gueymard94_pw(
data['temp_air'], data['relative_humidity'])
elif method == 'csv': # reading from a csv file
forecast_dates_csv = self.get_forecast_days_csv()
weather_csv_file_path = self.root + csv_path
# Loading the csv file, we will consider that this is the PV power in W
data = pd.read_csv(weather_csv_file_path, header=None, names=['ts', 'yhat'])
if self.params is not None:
if 'prediction_horizon' not in list(self.params['passed_data'].keys()):
data = pd.concat([data, data], axis=0)
else:
if self.params['passed_data']['prediction_horizon'] is None:
data = pd.concat([data, data], axis=0)
else:
data = pd.concat([data, data], axis=0)
# Check if the passed data has the correct length
if len(data) < len(forecast_dates_csv):
self.logger.error("Passed data from CSV is not long enough")
else:
# Define index and pick correct dates
data.index = forecast_dates_csv
data.drop(['ts'], axis=1, inplace=True)
data = data.copy().loc[self.forecast_dates]
elif method == 'list': # reading a list of values
forecast_dates_csv = self.get_forecast_days_csv()
# Loading data from passed list
data_list = self.params['passed_data']['pv_power_forecast']
if 'prediction_horizon' not in list(self.params['passed_data'].keys()):
data_list = data_list + data_list
else:
if self.params['passed_data']['prediction_horizon'] is None:
data_list = data_list + data_list
else:
data_list = data_list[0:self.params['passed_data']['prediction_horizon']]
# Check if the passed data has the correct length
if len(data_list) < len(forecast_dates_csv) and self.params['passed_data']['prediction_horizon'] is None:
self.logger.error("Passed data from passed list is not long enough")
else:
# Define index and pick correct dates
data_dict = {'ts':forecast_dates_csv, 'yhat':data_list}
data = pd.DataFrame.from_dict(data_dict)
data.index = forecast_dates_csv
data.drop(['ts'], axis=1, inplace=True)
data = data.copy().loc[self.forecast_dates]
else:
self.logger.error("Passed method is not valid")
return data
def cloud_cover_to_irradiance(self, cloud_cover: pd.Series,
offset:Optional[int] = 35) -> pd.DataFrame:
"""Estimates irradiance from cloud cover in the following steps:
1. Determine clear sky GHI using Ineichen model and
climatological turbidity.
2. Estimate cloudy sky GHI using a function of cloud_cover
3. Estimate cloudy sky DNI using the DISC model.
4. Calculate DHI from DNI and GHI.
(This function was copied and modified from PVLib)
:param cloud_cover: Cloud cover in %.
:type cloud_cover: pd.Series
:param offset: Determines the minimum GHI., defaults to 35
:type offset: Optional[int], optional
:return: Estimated GHI, DNI, and DHI.
:rtype: pd.DataFrame
"""
location = Location(latitude=self.lat, longitude=self.lon)
solpos = location.get_solarposition(cloud_cover.index)
cs = location.get_clearsky(cloud_cover.index, model='ineichen',
solar_position=solpos)
# Using only the linear method
offset = offset / 100.
cloud_cover_unit = copy.deepcopy(cloud_cover) / 100.
ghi = (offset + (1 - offset) * (1 - cloud_cover_unit)) * cs['ghi']
# Using disc model
dni = disc(ghi, solpos['zenith'], cloud_cover.index)['dni']
dhi = ghi - dni * np.cos(np.radians(solpos['zenith']))
irrads = pd.DataFrame({'ghi': ghi, 'dni': dni, 'dhi': dhi}).fillna(0)
return irrads
@staticmethod
def get_mix_forecast(df_now, df_forecast, alpha, beta, col):
first_fcst = alpha*df_forecast.iloc[0] + beta*df_now[col].iloc[-1]
df_forecast.iloc[0] = first_fcst
return df_forecast
def get_power_from_weather(self, df_weather: pd.DataFrame,
set_mix_forecast:Optional[bool] = False,
df_now:Optional[pd.DataFrame] = pd.DataFrame()) -> pd.Series:
"""
Convert wheater forecast data into electrical power.
:param df_weather: The DataFrame containing the weather forecasted data. \
This DF should be generated by the 'get_weather_forecast' method or at \
least contain the same columns names filled with proper data.
:type df_weather: pd.DataFrame
:param set_mix_forecast: Use a mixed forcast strategy to integra now/current values.
:type set_mix_forecast: Bool, optional
:param df_now: The DataFrame containing the now/current data.
:type df_now: pd.DataFrame
:return: The DataFrame containing the electrical power in Watts
:rtype: pd.DataFrame
"""
# If using csv method we consider that yhat is the PV power in W
if self.weather_forecast_method == 'csv' or self.weather_forecast_method == 'list':
P_PV_forecast = df_weather['yhat']
P_PV_forecast.name = None
else: # We will transform the weather data into electrical power
# Transform to power (Watts)
# Setting the main parameters of the PV plant
location = Location(latitude=self.lat, longitude=self.lon)
temp_params = TEMPERATURE_MODEL_PARAMETERS['sapm']['close_mount_glass_glass']
cec_modules = pvlib.pvsystem.retrieve_sam('CECMod')
cec_inverters = pvlib.pvsystem.retrieve_sam('cecinverter')
if type(self.plant_conf['module_model']) == list:
P_PV_forecast = pd.Series(0, index=df_weather.index)
for i in range(len(self.plant_conf['module_model'])):
# Selecting correct module and inverter
module = cec_modules[self.plant_conf['module_model'][i]]
inverter = cec_inverters[self.plant_conf['inverter_model'][i]]
# Building the PV system in PVLib
system = PVSystem(surface_tilt=self.plant_conf['surface_tilt'][i],
surface_azimuth=self.plant_conf['surface_azimuth'][i],
module_parameters=module,
inverter_parameters=inverter,
temperature_model_parameters=temp_params,
modules_per_string=self.plant_conf['modules_per_string'][i],
strings_per_inverter=self.plant_conf['strings_per_inverter'][i])
mc = ModelChain(system, location, aoi_model="physical")
# Run the model on the weather DF indexes
mc.run_model(df_weather)
# Extracting results for AC power
P_PV_forecast = P_PV_forecast + mc.results.ac
else:
# Selecting correct module and inverter
module = cec_modules[self.plant_conf['module_model']]
inverter = cec_inverters[self.plant_conf['inverter_model']]
# Building the PV system in PVLib
system = PVSystem(surface_tilt=self.plant_conf['surface_tilt'],
surface_azimuth=self.plant_conf['surface_azimuth'],
module_parameters=module,
inverter_parameters=inverter,
temperature_model_parameters=temp_params,
modules_per_string=self.plant_conf['modules_per_string'],
strings_per_inverter=self.plant_conf['strings_per_inverter'])
mc = ModelChain(system, location, aoi_model="physical")
# Run the model on the weather DF indexes
mc.run_model(df_weather)
# Extracting results for AC power
P_PV_forecast = mc.results.ac
if set_mix_forecast:
P_PV_forecast = forecast.get_mix_forecast(
df_now, P_PV_forecast,
self.params['passed_data']['alpha'], self.params['passed_data']['beta'], self.var_PV)
return P_PV_forecast
def get_forecast_days_csv(self, timedelta_days: Optional[int] = 1) -> pd.date_range:
"""
Get the date range vector of forecast dates that will be used when \
loading a CSV file.
:return: The forecast dates vector
:rtype: pd.date_range
"""
start_forecast_csv = pd.Timestamp(datetime.now(), tz=self.time_zone).replace(microsecond=0)
if self.method_ts_round == 'nearest':
start_forecast_csv = pd.Timestamp(datetime.now(), tz=self.time_zone).replace(microsecond=0)
elif self.method_ts_round == 'first':
start_forecast_csv = pd.Timestamp(datetime.now(), tz=self.time_zone).replace(microsecond=0).floor(freq=self.freq)
elif self.method_ts_round == 'last':
start_forecast_csv = pd.Timestamp(datetime.now(), tz=self.time_zone).replace(microsecond=0).ceil(freq=self.freq)
else:
self.logger.error("Wrong method_ts_round passed parameter")
end_forecast_csv = (start_forecast_csv + self.optim_conf['delta_forecast']).replace(microsecond=0)
forecast_dates_csv = pd.date_range(start=start_forecast_csv,
end=end_forecast_csv+timedelta(days=timedelta_days)-self.freq,
freq=self.freq).round(self.freq)
if self.params is not None:
if 'prediction_horizon' in list(self.params['passed_data'].keys()):
if self.params['passed_data']['prediction_horizon'] is not None:
forecast_dates_csv = forecast_dates_csv[0:self.params['passed_data']['prediction_horizon']]
return forecast_dates_csv
def get_forecast_out_from_csv(self, df_final: pd.DataFrame, forecast_dates_csv: pd.date_range,
csv_path: str, data_list: Optional[list] = None) -> pd.DataFrame:
"""
Get the forecast data as a DataFrame from a CSV file. The data contained \
in the CSV file should be a 24h forecast with the same frequency as \
the main 'freq' parameter in the configuration file. The timestamp \
will not be used and a new DateTimeIndex is generated to fit the \
timestamp index of the input data in 'df_final'.
:param df_final: The DataFrame containing the input data.
:type df_final: pd.DataFrame
:param forecast_dates_csv: The forecast dates vector
:type forecast_dates_csv: pd.date_range
:param csv_path: The path to the CSV file
:type csv_path: str
:return: The data from the CSV file
:rtype: pd.DataFrame
"""
days_list = df_final.index.day.unique().tolist()
if csv_path is None:
data_dict = {'ts':forecast_dates_csv, 'yhat':data_list}
df_csv = pd.DataFrame.from_dict(data_dict)
df_csv.index = forecast_dates_csv
df_csv.drop(['ts'], axis=1, inplace=True)
else:
load_csv_file_path = self.root + csv_path
df_csv = pd.read_csv(load_csv_file_path, header=None, names=['ts', 'yhat'])
df_csv.index = forecast_dates_csv
df_csv.drop(['ts'], axis=1, inplace=True)
forecast_out = pd.DataFrame()
for day in days_list:
first_elm_index = [i for i, x in enumerate(df_final.index.day == day) if x][0]
last_elm_index = [i for i, x in enumerate(df_final.index.day == day) if x][-1]
fcst_index = pd.date_range(start=df_final.index[first_elm_index],
end=df_final.index[last_elm_index],
freq=df_final.index.freq)
first_hour = str(df_final.index[first_elm_index].hour)+":"+str(df_final.index[first_elm_index].minute)
last_hour = str(df_final.index[last_elm_index].hour)+":"+str(df_final.index[last_elm_index].minute)
if len(forecast_out) == 0:
forecast_out = pd.DataFrame(
df_csv.between_time(first_hour, last_hour).values,
index=fcst_index)
else:
forecast_tp = pd.DataFrame(
df_csv.between_time(first_hour, last_hour).values,
index=fcst_index)
forecast_out = pd.concat([forecast_out, forecast_tp], axis=0)
return forecast_out
def get_load_forecast(self, days_min_load_forecast: Optional[int] = 3, method: Optional[str] = 'naive',
csv_path: Optional[str] = "/data/data_load_forecast.csv",
set_mix_forecast:Optional[bool] = False, df_now:Optional[pd.DataFrame] = pd.DataFrame()) -> pd.Series:
"""
Get and generate the load forecast data.
:param days_min_load_forecast: The number of last days to retrieve that \
will be used to generate a naive forecast, defaults to 3
:type days_min_load_forecast: int, optional
:param method: The method to be used to generate load forecast, the options \
are 'csv' to load a CSV file or 'naive' for a persistance model, defaults to 'naive'
:type method: str, optional
:param csv_path: The path to the CSV file used when method = 'csv', \
defaults to "/data/data_load_forecast.csv"
:type csv_path: str, optional
param set_mix_forecast: Use a mixed forcast strategy to integra now/current values.
:type set_mix_forecast: Bool, optional
:param df_now: The DataFrame containing the now/current data.
:type df_now: pd.DataFrame, optional
:return: The DataFrame containing the electrical load power in Watts
:rtype: pd.DataFrame
"""
if method == 'naive': # using a naive approach
self.logger.info("Retrieving data from hass for load forecast using method = "+method)
var_list = [self.var_load]
var_replace_zero = None
var_interp = [self.var_load]
time_zone_load_foreacast = None
# We will need to retrieve a new set of load data according to the days_min_load_forecast parameter
rh = retrieve_hass(self.retrieve_hass_conf['hass_url'], self.retrieve_hass_conf['long_lived_token'],
self.freq, time_zone_load_foreacast, self.params, self.root, self.logger)
if self.get_data_from_file:
with open(pathlib.Path(self.root+'/data/test_df_final.pkl'), 'rb') as inp:
rh.df_final, days_list, _ = pickle.load(inp)
else:
days_list = get_days_list(days_min_load_forecast)
rh.get_data(days_list, var_list)
rh.prepare_data(self.retrieve_hass_conf['var_load'], load_negative = self.retrieve_hass_conf['load_negative'],
set_zero_min = self.retrieve_hass_conf['set_zero_min'],
var_replace_zero = var_replace_zero,
var_interp = var_interp)
df = rh.df_final.copy()[[self.var_load_new]]
mask_forecast_out = (df.index > days_list[-1] - self.optim_conf['delta_forecast'])
forecast_out = df.copy().loc[mask_forecast_out]
forecast_out = forecast_out.rename(columns={self.var_load_new: 'yhat'})
# Force forecast_out length to avoid mismatches
forecast_out = forecast_out.iloc[0:len(self.forecast_dates)]
forecast_out.index = self.forecast_dates
elif method == 'csv': # reading from a csv file
forecast_dates_csv = self.get_forecast_days_csv()
load_csv_file_path = self.root + csv_path
df_csv = pd.read_csv(load_csv_file_path, header=None, names=['ts', 'yhat'])
if self.params is not None:
if 'prediction_horizon' not in list(self.params['passed_data'].keys()):
df_csv = pd.concat([df_csv, df_csv], axis=0)
else:
if self.params['passed_data']['prediction_horizon'] is None:
df_csv = pd.concat([df_csv, df_csv], axis=0)
else:
df_csv = pd.concat([df_csv, df_csv], axis=0)
if len(df_csv) < len(forecast_dates_csv):
self.logger.error("Passed data from CSV is not long enough")
else:
df_csv.index = forecast_dates_csv
df_csv.drop(['ts'], axis=1, inplace=True)
forecast_out = df_csv.copy().loc[self.forecast_dates]
elif method == 'list': # reading a list of values
forecast_dates_csv = self.get_forecast_days_csv()
# Loading data from passed list
data_list = self.params['passed_data']['load_power_forecast']
if 'prediction_horizon' not in list(self.params['passed_data'].keys()):
data_list = data_list + data_list
else:
if self.params['passed_data']['prediction_horizon'] is None:
data_list = data_list + data_list
else:
data_list = data_list[0:self.params['passed_data']['prediction_horizon']]
# Check if the passed data has the correct length
if len(data_list) < len(forecast_dates_csv) and self.params['passed_data']['prediction_horizon'] is None:
self.logger.error("Passed data from passed list is not long enough")
else:
# Define index and pick correct dates
data_dict = {'ts':forecast_dates_csv, 'yhat':data_list}
data = pd.DataFrame.from_dict(data_dict)
data.index = forecast_dates_csv
data.drop(['ts'], axis=1, inplace=True)
forecast_out = data.copy().loc[self.forecast_dates]
else:
self.logger.error("Passed method is not valid")
P_Load_forecast = copy.deepcopy(forecast_out['yhat'])
if set_mix_forecast:
P_Load_forecast = forecast.get_mix_forecast(
df_now, P_Load_forecast,
self.params['passed_data']['alpha'], self.params['passed_data']['beta'], self.var_load_new)
return P_Load_forecast
def get_load_cost_forecast(self, df_final: pd.DataFrame, method: Optional[str] = 'hp_hc_periods',
csv_path: Optional[str] = "/data/data_load_cost_forecast.csv") -> pd.DataFrame:
"""
Get the unit cost for the load consumption based on multiple tariff \
periods. This is the cost of the energy from the utility in a vector \
sampled at the fixed freq value.
:param df_final: The DataFrame containing the input data.
:type df_final: pd.DataFrame
:param method: The method to be used to generate load cost forecast, \
the options are 'hp_hc_periods' for peak and non-peak hours contracts\
and 'csv' to load a CSV file, defaults to 'hp_hc_periods'
:type method: str, optional
:param csv_path: The path to the CSV file used when method = 'csv', \
defaults to "/data/data_load_cost_forecast.csv"
:type csv_path: str, optional
:return: The input DataFrame with one additionnal column appended containing
the load cost for each time observation.
:rtype: pd.DataFrame
"""
if method == 'hp_hc_periods':
df_final[self.var_load_cost] = self.optim_conf['load_cost_hc']
list_df_hp = []
for key, period_hp in self.optim_conf['list_hp_periods'].items():
list_df_hp.append(df_final[self.var_load_cost].between_time(
period_hp[0]['start'], period_hp[1]['end']))
for df_hp in list_df_hp:
df_final.loc[df_hp.index, self.var_load_cost] = self.optim_conf['load_cost_hp']
elif method == 'csv':
forecast_dates_csv = self.get_forecast_days_csv(timedelta_days=0)
forecast_out = self.get_forecast_out_from_csv(df_final,
forecast_dates_csv,
csv_path)
df_final[self.var_load_cost] = forecast_out
elif method == 'list': # reading a list of values
forecast_dates_csv = self.get_forecast_days_csv(timedelta_days=0)
data_list = self.params['passed_data']['load_cost_forecast']
if 'prediction_horizon' in list(self.params['passed_data'].keys()):
if self.params['passed_data']['prediction_horizon'] is not None:
data_list = data_list[0:self.params['passed_data']['prediction_horizon']]
if len(data_list) < len(forecast_dates_csv) and self.params['passed_data']['prediction_horizon'] is None:
self.logger.error("Passed data from passed list is not long enough")
else:
forecast_out = self.get_forecast_out_from_csv(df_final,
forecast_dates_csv,
None,
data_list=data_list)
df_final[self.var_load_cost] = forecast_out
else:
self.logger.error("Passed method is not valid")
return df_final
def get_prod_price_forecast(self, df_final: pd.DataFrame, method: Optional[str] = 'constant',
csv_path: Optional[str] = "/data/data_prod_price_forecast.csv") -> pd.DataFrame:
"""
Get the unit power production price for the energy injected to the grid.\
This is the price of the energy injected to the utility in a vector \
sampled at the fixed freq value.
:param df_input_data: The DataFrame containing all the input data retrieved
from hass
:type df_input_data: pd.DataFrame
:param method: The method to be used to generate the production price forecast, \
the options are 'constant' for a fixed constant value and 'csv'\
to load a CSV file, defaults to 'constant'
:type method: str, optional
:param csv_path: The path to the CSV file used when method = 'csv', \
defaults to "/data/data_load_cost_forecast.csv"
:type csv_path: str, optional
:return: The input DataFrame with one additionnal column appended containing
the power production price for each time observation.
:rtype: pd.DataFrame
"""
if method == 'constant':
df_final[self.var_prod_price] = self.optim_conf['prod_sell_price']
elif method == 'csv':
forecast_dates_csv = self.get_forecast_days_csv(timedelta_days=0)
forecast_out = self.get_forecast_out_from_csv(df_final,
forecast_dates_csv,
csv_path)
df_final[self.var_prod_price] = forecast_out
elif method == 'list': # reading a list of values
forecast_dates_csv = self.get_forecast_days_csv(timedelta_days=0)
data_list = self.params['passed_data']['prod_price_forecast']
if 'prediction_horizon' in list(self.params['passed_data'].keys()):
if self.params['passed_data']['prediction_horizon'] is not None:
data_list = data_list[0:self.params['passed_data']['prediction_horizon']]
if len(data_list) < len(forecast_dates_csv) and self.params['passed_data']['prediction_horizon'] is None:
self.logger.error("Passed data from passed list is not long enough")
else:
forecast_out = self.get_forecast_out_from_csv(df_final,
forecast_dates_csv,
None,
data_list=data_list)
df_final[self.var_prod_price] = forecast_out
else:
self.logger.error("Passed method is not valid")
return df_final
| 54.571207
| 128
| 0.610586
|
4a087d373038f9f9e55e0cbb1f9315061819cc8b
| 1,194
|
py
|
Python
|
nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py
|
nicholsn/nipype
|
6601b00aac39d17bb9fb3a6801f5a740a6ebb1e3
|
[
"BSD-3-Clause"
] | 1
|
2018-04-18T12:13:37.000Z
|
2018-04-18T12:13:37.000Z
|
nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py
|
ito-takuya/nipype
|
9099a5809487b55868cdec82a719030419cbd6ba
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py
|
ito-takuya/nipype
|
9099a5809487b55868cdec82a719030419cbd6ba
|
[
"BSD-3-Clause"
] | 1
|
2021-09-08T14:31:47.000Z
|
2021-09-08T14:31:47.000Z
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.slicer.filtering.morphology import GrayscaleGrindPeakImageFilter
def test_GrayscaleGrindPeakImageFilter_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputVolume=dict(argstr='%s',
position=-2,
),
outputVolume=dict(argstr='%s',
hash_files=False,
position=-1,
),
terminal_output=dict(mandatory=True,
nohash=True,
),
)
inputs = GrayscaleGrindPeakImageFilter.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_GrayscaleGrindPeakImageFilter_outputs():
output_map = dict(outputVolume=dict(position=-1,
),
)
outputs = GrayscaleGrindPeakImageFilter.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| 29.121951
| 87
| 0.69263
|
4a087da175ee81a4929aec23302718b03826c2fb
| 9,195
|
py
|
Python
|
drone_awe/app_old.py
|
rymanderson/Drone-Models
|
396ed030f277a96365c7cbfaffb3d2006e5b12a8
|
[
"MIT"
] | 2
|
2019-12-01T10:27:54.000Z
|
2019-12-01T10:28:07.000Z
|
drone_awe/app_old.py
|
rymanderson/drone_awe
|
396ed030f277a96365c7cbfaffb3d2006e5b12a8
|
[
"MIT"
] | null | null | null |
drone_awe/app_old.py
|
rymanderson/drone_awe
|
396ed030f277a96365c7cbfaffb3d2006e5b12a8
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import datetime
import csv
import os
import classes
import functions as fun
simulationparams = fun.getParams('Simulation','settings_list.txt','settings.txt'," ")
validation = None
if simulationparams['validation'] == True: #validation == True
validation = True
validationcase = simulationparams['validationcase']
simulationparams = fun.getParams('Validation/' + validationcase,'settings_list.txt','settings.txt'," ","params/Simulation") #specifies settings_list is in separate path
else:
validation = False
xlabel = simulationparams['xlabel']
# ensure xlabel is an independent variable
independentvariables = [
"startstateofcharge",
"altitude",
"temperature",
"dropsize",
"liquidwatercontent",
"newtemperature",
"windspeed",
"winddirection",
"relativehumidity",
"payload",
"missionspeed",
"model"
]
if xlabel not in independentvariables:
raise(Exception("~~~~~ ERROR: desired x variable is not independent ~~~~~"))
ylabel = simulationparams['ylabel']
weatherlist = []
#instantiate drone
if simulationparams['drone'] == True:
conversions = fun.getParams('Drone','paramlist.param','conversions.param'," ")
dronename = simulationparams['dronename']
if validation:
droneparams = fun.getParams('Validation/' + validationcase,'paramlist.param',dronename + ".param"," ","params/Drone")
else:
droneparams = fun.getParams('Drone','paramlist.param',dronename + '.param',' ')
droneconversions = fun.getParams('Drone','paramlist.param','conversions.param',' ')
drone = classes.Drone(dronename,droneparams,droneconversions)
else:
raise Exception('Must specify drone name')
# instantiate battery
stateofhealth = simulationparams['stateofhealth']
startstateofcharge = simulationparams['startstateofcharge']
batterytechnology = simulationparams['batterytechnology']
battery = classes.Battery(drone,stateofhealth,startstateofcharge, batterytechnology)
# instantiate mission
missionparams = fun.getParams('Mission','list.mission','simple.mission'," ")
mission = classes.Mission(missionparams)
# Temperature
if xlabel == 'temperature':
newtemperature = simulationparams['xbegin']
else:
newtemperature = simulationparams['temperature']
temperatureparams = {'temperature':newtemperature} # Ampere-hours
temperature = classes.Temperature(temperatureparams)
weatherlist.append(temperature)
# Humidity
if xlabel == 'humidity':
relativehumidity = simulationparams['xbegin']
else:
relativehumidity = simulationparams['relativehumidity']
humidityparams = {'relativehumidity':relativehumidity}
humidity = classes.Humidity(humidityparams)
weatherlist.append(humidity)
# Rain
# dropsize = simulationparams['dropsize']
# liquidwatercontent = simulationparams['liquidwatercontent']
# # …
# rain = classes.Rain(dropsize,liquidwatercontent)
# weatherlist.append('rain')
# Wind
# speed = simulationparams['windspeed']
# direction = simulationparams['winddirection']
# wind = classes.Wind(speed,direction)
# weatherlist.append(wind)
# Icing
# weatherlist.append('icing')
# icing = classes.Icing()
# print("Weather parameters are: ")
# print(str(weatherlist)[1:-1])
# weatherparams = []
# for weathertype in weatherlist:
# weatherparams = weatherparams + weathertype.params
weather = classes.Weather(simulationparams['altitude'],weatherlist)
print("Preparing to update weather:")
weather.update()
print("Weather updated.")
power = classes.Power(drone,weather,mission)
#simulation variables
timestep = simulationparams['timestep'] # more relevant later
simulationtype = simulationparams['simulationtype']
desiredresult = simulationparams['ylabel']
xbegin = simulationparams['xbegin']
xend = simulationparams['xend']
numsimulations = simulationparams['xnumber']
simulation = classes.Simulation(timestep,simulationtype)#,desiredresult)
x = np.linspace(xbegin,xend,numsimulations)
y = []
xplot = x
yplot = []
if "weathereffect" in simulationparams:
weathereffect = simulationparams["weathereffect"]
weatherbegin = simulationparams["weatherbegin"]
weatherend = simulationparams["weatherend"]
weathernumber = int(simulationparams["weathernumber"])
wvector = np.linspace(weatherbegin,weatherend,weathernumber)
else:
weathernumber = int(1)
wvector = range(weathernumber) # only iterate once
for zvalue in wvector:
if "weathereffect" in simulationparams:
if weathereffect == 'temperature':
print("weathereffect = temperature confirmed")
weather.weatherlist[0].params["temperature"] = zvalue
elif weathereffect == 'relativehumidity':
weather.weatherlist[1].params["relativehummidity"] = zvalue
else:
raise(Exception("ERROR: weathereffect not a valid input"))
weather.update()
power.update(drone,weather,mission)
battery.update()
# simulation.run(drone,battery,power,weather,mission)
# if ylabel in drone.params:
# y.append(drone.params[ylabel])
# elif ylabel in simulation.params:
# y.append(simulation.params[ylabel])
# elif ylabel in weather.params:
# y.append(weather.params[ylabel])
# elif ylabel in mission.params:
# y.append(mission.params[ylabel])
# elif ylabel in power.params:
# y.append(power.params[ylabel]*180.0/np.pi)
# elif ylabel in simulationparams:
# y.append(simulationparams[ylabel])
# else:
# raise(Exception("~~~~~ ERROR: desired y variable not found ~~~~~"))
for xvalue in x:
# update value
## determine x location
if xlabel in drone.params:
drone.params[xlabel] = xvalue
power.update(drone,weather,mission)
battery.update()
elif xlabel in weather.params:
if xlabel == 'temperature':
weather.weatherlist[0].params[xlabel] = xvalue
elif xlabel == 'relativehumidity':
weather.weatherlist[1].params[xlabel] = xvalue
weather.update()
power.update(drone,weather,mission)
battery.update()
elif xlabel in mission.params:
mission.params[xlabel] = xvalue
power.update(drone,weather,mission)
battery.update()
elif xlabel in simulationparams:
simulationparams[xlabel] = xvalue
power.update(drone,weather,mission)
battery.update()
else:
raise(Exception("~~~~~ ERROR: desired x variable not set ~~~~~"))
simulation.run(drone,battery,power,weather,mission)
if ylabel in drone.params:
y.append(drone.params[ylabel])
elif ylabel in simulation.params:
y.append(simulation.params[ylabel])
elif ylabel in weather.params:
y.append(weather.params[ylabel])
elif ylabel in mission.params:
y.append(mission.params[ylabel])
elif ylabel in power.params:
y.append(power.params[ylabel])
elif ylabel in simulationparams:
y.append(simulationparams[ylabel])
else:
raise(Exception("~~~~~ ERROR: desired y variable not found ~~~~~"))
yplot.append(y)
y = []
print("x data includes: ",xplot)
print("y data includes: ",yplot)
print("")
print("EXE.py: Independent variable is ",xlabel)
print("EXE.py: Desired Result is ",desiredresult)
if "weathereffect" in simulationparams:
print("EXE.py: Z iterator is ",simulationparams['weathereffect'])
if not validation: #proceed with normal plot
if simulationparams['plot'] == True:
xlabel = simulationparams['xlabel']
ylabel = desiredresult
axistitle = simulationparams['title']
plotter = classes.Plotter(xplot,xlabel,yplot,ylabel,axistitle,weathernumber)
plotter.plot_line()
else:
print('No plot functionality has been defined.')
else: # Plot validation data on top of our model
xvalid,yvalid = fun.getXandY(validationcase,",")
# yvalid = [x * 60.0 for x in yvalid] #only for converting from minutes to seconds until we get the conversion working before plotting
if simulationparams['plot'] == True:
xlabel = simulationparams['xlabel']
ylabel = desiredresult
axistitle = simulationparams['title'] + " Validation"
plotter = classes.Plotter(xplot,xlabel,yplot,ylabel,axistitle,weathernumber)
plotter.plot_validation(xvalid,yvalid)
else:
print('No plot functionality has been defined.')
| 36.927711
| 172
| 0.643719
|
4a087dcfd10562a1dd9b6588c16a49d42ba10c63
| 4,217
|
py
|
Python
|
FlABTSpec.py
|
WilliamWickerson/Florida-ABT-FLAT-Spec
|
a1f2e92b5aaa61692b418f684dd0ef479054b851
|
[
"MIT"
] | null | null | null |
FlABTSpec.py
|
WilliamWickerson/Florida-ABT-FLAT-Spec
|
a1f2e92b5aaa61692b418f684dd0ef479054b851
|
[
"MIT"
] | null | null | null |
FlABTSpec.py
|
WilliamWickerson/Florida-ABT-FLAT-Spec
|
a1f2e92b5aaa61692b418f684dd0ef479054b851
|
[
"MIT"
] | null | null | null |
_types = {
1 : "Beer",
2 : "Wine",
3 : "Spirits/Liquor",
4 : "Cigars",
5 : "Cigarette",
6 : "Other Tobacco",
7 : "Other",
8 : "Total",
}
#Pads integer with 0s right justified
def _zeroPad(x, size):
string = str(x)
if len(string) < size:
padding = "0" * (size - len(string))
string = padding + string
return string
#Pads string with spaces left justified
def _spacePadLeft(string, size):
if len(string) > size:
string = string[0:size]
elif len(string) < size:
string += " " * (size - len(string))
return string
#Pads string with spaces right justified
def _spacePadRight(string, size):
if len(string) > size:
string = string[0:size]
elif len(string) < size:
string = (" " * (size - len(string))) + string
return string
#Gets the string for a single type's sales
def _salesBlock(itemType, amount):
block = _zeroPad(itemType, 2) + "0000"
block += _zeroPad(abs(amount), 11)
block += "+" if amount >= 0 else "-"
return block
#Wrapes _salesBlock to provide type from dictionary
def _typeBlock(itemType, typeDic):
return _salesBlock(itemType, typeDic.get(_types[itemType], 0))
#Returns a string for all 8 types
def _fullSales(typeDic):
line = ""
for i in range(1, 9):
line += _typeBlock(i, typeDic)
return line
#Starts at BEV1100001 and returns next free BEV number
#Use this for any companies missing a license number
currLicenseNum = 1100001
def nextLicenseNum():
global currLicenseNum
string = "BEV" + _zeroPad(currLicenseNum, 7)
currLicenseNum += 1
return string
#Returns a 29 byte string header
def header(FEIN, licenseNum):
assert len(FEIN) == 9 and len(licenseNum) == 10
return "01" + "0000" + FEIN + "0000" + licenseNum
#Returns a 188 byte string of the Type 80 record
#startDate and endDate are of the form MMDDYYYY
#typeDic is a dictionary like: { "Cigars" : 1234 }
#which would mean $12.34 in annual Cigar sales
#See the above _types dictionary
def sellerReport(licenseNum, startDate, endDate, typeDic):
assert len(licenseNum) == 10 and len(startDate) == 8 and len(endDate) == 8
line = "80" + "0000"
line += licenseNum
line += "0000"
line += startDate
line += "0000"
line += endDate
line += "0000"
line += _fullSales(typeDic)
return line
#Returns a 182 byte string of the Type 81 record
#The retail SUT and FEIN are optional, in which case
#Just replace them with the appropriate number of space characters
def retailerInfo(name, address, city, state, zipcode, retailLicenseNum, retailSUTNum, retailFEIN, sellerLicenseNum):
zipcode = _zeroPad(int(zipcode), 5)
assert len(state) == 2 and len(zipcode) == 5 and len(retailSUTNum) == 13 and len(retailFEIN) == 9 and len(sellerLicenseNum) == 10
line = "81" + "0000"
line += _spacePadLeft(name, 40)
line += "0000"
line += _spacePadLeft(address, 40)
line += _spacePadLeft(city, 26)
line += state
line += zipcode
line += "0000"
line += _spacePadRight(retailLicenseNum, 11)
line += "0000"
line += retailSUTNum
line += "0000"
line += retailFEIN
line += "0000"
line += sellerLicenseNum
return line
#Returns a 183 byte string of the Type 82 record
#monthDate is of the form MMYY
#typeDic is of the same format as Type 80 record
def retailerMonth(monthDate, retailLicenseNum, sellerLicenseNum, typeDic):
assert len(monthDate) == 4 and len(sellerLicenseNum) == 10
line = "82" + "0000"
line += monthDate
line += "0000"
line += _fullSales(typeDic)
line += _spacePadRight(retailLicenseNum, 11)
line += "0000"
line += sellerLicenseNum
return line
#Returns a 59 byte string of the Type 99 trailer
#num81 and num82 are the number of Type 81 and
#Type 82 records respectively, the latter being
#twelve times the former if months aren't left out
def trailer(FEIN, licenseNum, num81, num82):
assert len(FEIN) == 9 and len(licenseNum) == 10
line = "99" + "0000"
line += FEIN
line += "0000"
line += licenseNum
line += "0000"
line += _zeroPad(num81, 11)
line += "0000"
line += _zeroPad(num82, 11)
return line
| 30.781022
| 133
| 0.657339
|
4a087f7596781574d1f00da64b7957b05edddd59
| 205
|
py
|
Python
|
django/mysite/tourit/admin.py
|
rishiraj-rpg/MPR--Tour-It
|
923dc55f49848583898b6402824c7bcf6d8ebe7b
|
[
"MIT"
] | null | null | null |
django/mysite/tourit/admin.py
|
rishiraj-rpg/MPR--Tour-It
|
923dc55f49848583898b6402824c7bcf6d8ebe7b
|
[
"MIT"
] | null | null | null |
django/mysite/tourit/admin.py
|
rishiraj-rpg/MPR--Tour-It
|
923dc55f49848583898b6402824c7bcf6d8ebe7b
|
[
"MIT"
] | 1
|
2022-03-22T17:43:33.000Z
|
2022-03-22T17:43:33.000Z
|
from django.contrib import admin
# Register your models here.
from .models import *
admin.site.register(Customer)
admin.site.register(Place)
admin.site.register(PlaceType)
admin.site.register(Itinerary)
| 20.5
| 32
| 0.804878
|
4a087f79c3efa511ffe91fa2836451ba48eeabe6
| 471
|
py
|
Python
|
database/database.py
|
w16/backuper
|
9b943cdbce292e67bc57d1095660181a10206423
|
[
"MIT"
] | null | null | null |
database/database.py
|
w16/backuper
|
9b943cdbce292e67bc57d1095660181a10206423
|
[
"MIT"
] | null | null | null |
database/database.py
|
w16/backuper
|
9b943cdbce292e67bc57d1095660181a10206423
|
[
"MIT"
] | null | null | null |
from abc import ABCMeta, abstractmethod
from shell.system import SystemShell
class Database:
__metaclass__ = ABCMeta
def __init__(self, database, username, password, host, port, shell=SystemShell):
self.database = database
self.username = username
self.password = password
self.host = host
self.port = port
self.shell = SystemShell
@abstractmethod
def dump(self, output_file): raise NotImplementedError()
| 29.4375
| 84
| 0.692144
|
4a088020a1c0d0d7db30efa4b8bc16289e2d698f
| 12,408
|
py
|
Python
|
localstack/services/events/events_starter.py
|
rubencosta/localstack
|
369f65f1635c09934e23e3dbca54fbdb31cb7b74
|
[
"Apache-2.0"
] | 31,928
|
2017-07-04T03:06:28.000Z
|
2022-03-31T22:33:27.000Z
|
localstack/services/events/events_starter.py
|
rubencosta/localstack
|
369f65f1635c09934e23e3dbca54fbdb31cb7b74
|
[
"Apache-2.0"
] | 5,216
|
2017-07-04T11:45:41.000Z
|
2022-03-31T22:02:14.000Z
|
localstack/services/events/events_starter.py
|
lambdafunc/localstack
|
6285b43bec57435a2179310a8de2af8d8d8cf8dd
|
[
"Apache-2.0"
] | 3,056
|
2017-06-05T13:29:11.000Z
|
2022-03-31T20:54:43.000Z
|
import datetime
import ipaddress
import json
import logging
import re
import uuid
from typing import Any, Dict, List
from moto.events.models import Rule as rule_model
from moto.events.responses import EventsHandler as events_handler
from localstack import config
from localstack.constants import APPLICATION_AMZ_JSON_1_1, TEST_AWS_ACCOUNT_ID
from localstack.services.events.events_listener import DEFAULT_EVENT_BUS_NAME, _dump_events_to_files
from localstack.services.events.scheduler import JobScheduler
from localstack.services.infra import start_moto_server
from localstack.utils.aws import aws_stack
from localstack.utils.aws.message_forwarding import send_event_to_target
from localstack.utils.common import extract_jsonpath, short_uid, truncate
LOG = logging.getLogger(__name__)
CONTENT_BASE_FILTER_KEYWORDS = ["prefix", "anything-but", "numeric", "cidr", "exists"]
def filter_event_with_target_input_path(target: Dict, event: Dict) -> Dict:
input_path = target.get("InputPath")
if input_path:
event = extract_jsonpath(event, input_path)
return event
# TODO: unclear shared responsibility for filtering with filter_event_with_content_base_parameter
def handle_prefix_filtering(event_pattern, value):
for element in event_pattern:
if isinstance(element, (int, str)):
if str(element) == str(value):
return True
elif isinstance(element, dict) and "prefix" in element:
if value.startswith(element.get("prefix")):
return True
elif isinstance(element, dict) and "anything-but" in element:
if element.get("anything-but") != value:
return True
elif "numeric" in element:
return handle_numeric_conditions(element.get("numeric"), value)
elif isinstance(element, list):
if value in list:
return True
return False
def handle_numeric_conditions(conditions: List[Any], value: float):
for i in range(0, len(conditions), 2):
if conditions[i] == "<" and not (value < conditions[i + 1]):
return False
if conditions[i] == ">" and not (value > conditions[i + 1]):
return False
if conditions[i] == "<=" and not (value <= conditions[i + 1]):
return False
if conditions[i] == ">=" and not (value >= conditions[i + 1]):
return False
return True
# TODO: refactor/simplify
def filter_event_based_on_event_format(self, rule_name: str, event: Dict[str, Any]):
def filter_event(event_pattern_filter: Dict[str, Any], event: Dict[str, Any]):
for key, value in event_pattern_filter.items():
event_value = event.get(key.lower())
if event_value is None:
return False
if event_value and isinstance(event_value, dict):
for key_a, value_a in event_value.items():
if key_a == "ip":
# TODO add IP-Address check here
continue
if isinstance(value.get(key_a), (int, str)):
if value_a != value.get(key_a):
return False
if isinstance(value.get(key_a), list) and value_a not in value.get(key_a):
if not handle_prefix_filtering(value.get(key_a), value_a):
return False
elif isinstance(value, list) and not identify_content_base_parameter_in_pattern(value):
if (
isinstance(event_value, list)
and get_two_lists_intersection(value, event_value) == []
):
return False
elif (
not isinstance(event_value, list)
and isinstance(event_value, (str, int))
and event_value not in value
):
return False
elif isinstance(value, list) and identify_content_base_parameter_in_pattern(value):
if not filter_event_with_content_base_parameter(value, event_value):
return False
elif isinstance(value, (str, dict)):
try:
value = json.loads(value) if isinstance(value, str) else value
if isinstance(value, dict) and not filter_event(value, event_value):
return False
except json.decoder.JSONDecodeError:
return False
return True
rule_information = self.events_backend.describe_rule(rule_name)
if not rule_information:
LOG.info('Unable to find rule "%s" in backend: %s', rule_name, rule_information)
return False
if rule_information.event_pattern._pattern:
event_pattern = rule_information.event_pattern._pattern
if not filter_event(event_pattern, event):
return False
return True
def process_events(event: Dict, targets: List[Dict]):
for target in targets:
arn = target["Arn"]
changed_event = filter_event_with_target_input_path(target, event)
try:
send_event_to_target(arn, changed_event, aws_stack.get_events_target_attributes(target))
except Exception as e:
LOG.info(f"Unable to send event notification {truncate(event)} to target {target}: {e}")
def apply_patches():
# Fix events ARN
def rule_model_generate_arn(self, name):
return "arn:aws:events:{region_name}:{account_id}:rule/{name}".format(
region_name=self.region_name, account_id=TEST_AWS_ACCOUNT_ID, name=name
)
# specific logic for put_events which forwards matching events to target listeners
def events_handler_put_events(self):
entries = self._get_param("Entries")
events = list(map(lambda event: {"event": event, "uuid": str(uuid.uuid4())}, entries))
_dump_events_to_files(events)
event_rules = self.events_backend.rules
for event_envelope in events:
event = event_envelope["event"]
event_bus = event.get("EventBusName") or DEFAULT_EVENT_BUS_NAME
matchine_rules = [r for r in event_rules.values() if r.event_bus_name == event_bus]
if not matchine_rules:
continue
formatted_event = {
"version": "0",
"id": event_envelope["uuid"],
"detail-type": event.get("DetailType"),
"source": event.get("Source"),
"account": TEST_AWS_ACCOUNT_ID,
"time": datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ"),
"region": self.region,
"resources": event.get("Resources", []),
"detail": json.loads(event.get("Detail", "{}")),
}
targets = []
for rule in matchine_rules:
if filter_event_based_on_event_format(self, rule.name, formatted_event):
targets.extend(self.events_backend.list_targets_by_rule(rule.name)["Targets"])
# process event
process_events(formatted_event, targets)
content = {
"FailedEntryCount": 0, # TODO: dynamically set proper value when refactoring
"Entries": list(map(lambda event: {"EventId": event["uuid"]}, events)),
}
self.response_headers.update(
{"Content-Type": APPLICATION_AMZ_JSON_1_1, "x-amzn-RequestId": short_uid()}
)
return json.dumps(content), self.response_headers
rule_model._generate_arn = rule_model_generate_arn
events_handler.put_events = events_handler_put_events
def start_scheduler():
JobScheduler.start()
def start_events(port=None, asynchronous=None, update_listener=None):
port = port or config.PORT_EVENTS
apply_patches()
start_scheduler()
return start_moto_server(
key="events",
port=port,
name="Cloudwatch Events",
asynchronous=asynchronous,
update_listener=update_listener,
)
# ---------------
# HELPER METHODS
# ---------------
def get_two_lists_intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
def identify_content_base_parameter_in_pattern(parameters):
if any(
list(param.keys())[0] in CONTENT_BASE_FILTER_KEYWORDS
for param in parameters
if isinstance(param, dict)
):
return True
def filter_event_with_content_base_parameter(pattern_value, event_value):
for element in pattern_value:
if (isinstance(element, (str, int))) and (event_value == element or element in event_value):
return True
elif isinstance(element, dict):
element_key = list(element.keys())[0]
element_value = element.get(element_key)
if element_key.lower() == "prefix":
if isinstance(event_value, str) and event_value.startswith(element_value):
return True
elif element_key.lower() == "exists":
if element_value and event_value:
return True
elif not element_value and not event_value:
return True
elif element_key.lower() == "cidr":
ips = [str(ip) for ip in ipaddress.IPv4Network(element_value)]
if event_value in ips:
return True
elif element_key.lower() == "numeric":
if check_valid_numeric_content_base_rule(element_value):
for index in range(len(element_value)):
if isinstance(element_value[index], int):
continue
if (
element_value[index] == ">"
and isinstance(element_value[index + 1], int)
and event_value <= element_value[index + 1]
):
break
elif (
element_value[index] == ">="
and isinstance(element_value[index + 1], int)
and event_value < element_value[index + 1]
):
break
elif (
element_value[index] == "<"
and isinstance(element_value[index + 1], int)
and event_value >= element_value[index + 1]
):
break
elif (
element_value[index] == "<="
and isinstance(element_value[index + 1], int)
and event_value > element_value[index + 1]
):
break
else:
return True
elif element_key.lower() == "anything-but":
if isinstance(element_value, list) and event_value not in element_value:
return True
elif (isinstance(element_value, (str, int))) and event_value != element_value:
return True
elif isinstance(element_value, dict):
nested_key = list(element_value)[0]
if nested_key == "prefix" and not re.match(
r"^{}".format(element_value.get(nested_key)), event_value
):
return True
return False
def check_valid_numeric_content_base_rule(list_of_operators):
if len(list_of_operators) > 4:
return False
if "=" in list_of_operators:
return False
if len(list_of_operators) > 2:
upper_limit = None
lower_limit = None
for index in range(len(list_of_operators)):
if not isinstance(list_of_operators[index], int) and "<" in list_of_operators[index]:
upper_limit = list_of_operators[index + 1]
if not isinstance(list_of_operators[index], int) and ">" in list_of_operators[index]:
lower_limit = list_of_operators[index + 1]
if upper_limit and lower_limit and upper_limit < lower_limit:
return False
index = index + 1
return True
| 39.390476
| 100
| 0.5843
|
4a08812c20a6898b5e24011a664faa20a7814b5d
| 1,663
|
py
|
Python
|
zerver/lib/stream_traffic.py
|
dumpmemory/zulip
|
496273ddbc567330a0022699d6d6eb5c646e5da5
|
[
"Apache-2.0"
] | 4
|
2021-09-16T16:46:55.000Z
|
2022-02-06T13:00:21.000Z
|
zerver/lib/stream_traffic.py
|
dumpmemory/zulip
|
496273ddbc567330a0022699d6d6eb5c646e5da5
|
[
"Apache-2.0"
] | null | null | null |
zerver/lib/stream_traffic.py
|
dumpmemory/zulip
|
496273ddbc567330a0022699d6d6eb5c646e5da5
|
[
"Apache-2.0"
] | 1
|
2022-01-15T08:36:09.000Z
|
2022-01-15T08:36:09.000Z
|
import datetime
from typing import Dict, Optional, Set
from django.db.models import Sum
from django.utils.timezone import now as timezone_now
from analytics.lib.counts import COUNT_STATS
from analytics.models import StreamCount
def get_streams_traffic(stream_ids: Set[int]) -> Dict[int, int]:
stat = COUNT_STATS["messages_in_stream:is_bot:day"]
traffic_from = timezone_now() - datetime.timedelta(days=28)
query = StreamCount.objects.filter(property=stat.property, end_time__gt=traffic_from)
query = query.filter(stream_id__in=stream_ids)
traffic_list = query.values("stream_id").annotate(value=Sum("value"))
traffic_dict = {}
for traffic in traffic_list:
traffic_dict[traffic["stream_id"]] = traffic["value"]
return traffic_dict
def round_to_2_significant_digits(number: int) -> int:
return int(round(number, 2 - len(str(number))))
STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS = 7
def get_average_weekly_stream_traffic(
stream_id: int, stream_date_created: datetime.datetime, recent_traffic: Dict[int, int]
) -> Optional[int]:
try:
stream_traffic = recent_traffic[stream_id]
except KeyError:
stream_traffic = 0
stream_age = (timezone_now() - stream_date_created).days
if stream_age >= 28:
average_weekly_traffic = int(stream_traffic // 4)
elif stream_age >= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS:
average_weekly_traffic = int(stream_traffic * 7 // stream_age)
else:
return None
if average_weekly_traffic == 0 and stream_traffic > 0:
average_weekly_traffic = 1
return round_to_2_significant_digits(average_weekly_traffic)
| 30.796296
| 90
| 0.74083
|
4a08822d54ead026a36bbbfeed9eb2f548a35a40
| 7,237
|
py
|
Python
|
test/probe/test_empty_device_handoff.py
|
aristanetworks/swift
|
9fe774840e75cc54f2e0302e1e4501807fdb8b3c
|
[
"Apache-2.0"
] | null | null | null |
test/probe/test_empty_device_handoff.py
|
aristanetworks/swift
|
9fe774840e75cc54f2e0302e1e4501807fdb8b3c
|
[
"Apache-2.0"
] | null | null | null |
test/probe/test_empty_device_handoff.py
|
aristanetworks/swift
|
9fe774840e75cc54f2e0302e1e4501807fdb8b3c
|
[
"Apache-2.0"
] | 1
|
2020-06-05T08:07:09.000Z
|
2020-06-05T08:07:09.000Z
|
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import time
from subprocess import call
from unittest import main, TestCase
from uuid import uuid4
from swiftclient import client
from swift.common import direct_client
from test.probe.common import kill_server, kill_servers, reset_environment,\
start_server
from swift.common.utils import readconf
class TestEmptyDevice(TestCase):
def setUp(self):
(self.pids, self.port2server, self.account_ring, self.container_ring,
self.object_ring, self.url, self.token,
self.account, self.configs) = reset_environment()
def tearDown(self):
kill_servers(self.port2server, self.pids)
def _get_objects_dir(self, onode):
device = onode['device']
node_id = (onode['port'] - 6000) / 10
obj_server_conf = readconf(self.configs['object'] % node_id)
devices = obj_server_conf['app:object-server']['devices']
obj_dir = '%s/%s' % (devices, device)
return obj_dir
def test_main(self):
# Create container
# Kill one container/obj primary server
# Delete the "objects" directory on the primary server
# Create container/obj (goes to two primary servers and one handoff)
# Kill other two container/obj primary servers
# Indirectly through proxy assert we can get container/obj
# Restart those other two container/obj primary servers
# Directly to handoff server assert we can get container/obj
# Assert container listing (via proxy and directly) has container/obj
# Bring the first container/obj primary server back up
# Assert that it doesn't have container/obj yet
# Run object replication for first container/obj primary server
# Run object replication for handoff node
# Assert the first container/obj primary server now has container/obj
# Assert the handoff server no longer has container/obj
container = 'container-%s' % uuid4()
client.put_container(self.url, self.token, container)
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
cnode = cnodes[0]
obj = 'object-%s' % uuid4()
opart, onodes = self.object_ring.get_nodes(
self.account, container, obj)
onode = onodes[0]
kill_server(onode['port'], self.port2server, self.pids)
obj_dir = '%s/objects' % self._get_objects_dir(onode)
shutil.rmtree(obj_dir, True)
self.assertFalse(os.path.exists(obj_dir))
client.put_object(self.url, self.token, container, obj, 'VERIFY')
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != 'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
# Kill all primaries to ensure GET handoff works
for node in onodes[1:]:
kill_server(node['port'], self.port2server, self.pids)
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != 'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
for node in onodes[1:]:
start_server(node['port'], self.port2server, self.pids)
self.assertFalse(os.path.exists(obj_dir))
# We've indirectly verified the handoff node has the object, but
# let's directly verify it.
another_onode = self.object_ring.get_more_nodes(opart).next()
odata = direct_client.direct_get_object(
another_onode, opart, self.account, container, obj)[-1]
if odata != 'VERIFY':
raise Exception('Direct object GET did not return VERIFY, instead '
'it returned: %s' % repr(odata))
objs = [o['name'] for o in
client.get_container(self.url, self.token, container)[1]]
if obj not in objs:
raise Exception('Container listing did not know about object')
timeout = time.time() + 5
found_objs_on_cnode = []
while time.time() < timeout:
for cnode in [c for c in cnodes if cnodes not in
found_objs_on_cnode]:
objs = [o['name'] for o in
direct_client.direct_get_container(
cnode, cpart, self.account, container)[1]]
if obj in objs:
found_objs_on_cnode.append(cnode)
if len(found_objs_on_cnode) >= len(cnodes):
break
time.sleep(0.3)
if len(found_objs_on_cnode) < len(cnodes):
missing = ['%s:%s' % (cnode['ip'], cnode['port']) for cnode in
cnodes if cnode not in found_objs_on_cnode]
raise Exception('Container servers %r did not know about object' %
missing)
start_server(onode['port'], self.port2server, self.pids)
self.assertFalse(os.path.exists(obj_dir))
exc = None
try:
direct_client.direct_get_object(onode, opart, self.account,
container, obj)
except direct_client.ClientException as err:
exc = err
self.assertEquals(exc.http_status, 404)
self.assertFalse(os.path.exists(obj_dir))
try:
port_num = onode['replication_port']
except KeyError:
port_num = onode['port']
try:
another_port_num = another_onode['replication_port']
except KeyError:
another_port_num = another_onode['port']
call(['swift-object-replicator',
self.configs['object-replicator'] %
((port_num - 6000) / 10), 'once'])
call(['swift-object-replicator',
self.configs['object-replicator'] %
((another_port_num - 6000) / 10), 'once'])
odata = direct_client.direct_get_object(onode, opart, self.account,
container, obj)[-1]
if odata != 'VERIFY':
raise Exception('Direct object GET did not return VERIFY, instead '
'it returned: %s' % repr(odata))
exc = None
try:
direct_client.direct_get_object(another_onode, opart, self.account,
container, obj)
except direct_client.ClientException as err:
exc = err
self.assertEquals(exc.http_status, 404)
if __name__ == '__main__':
main()
| 43.335329
| 79
| 0.613928
|
4a08827602487b6fa2a5df2c81c4f17c04faa860
| 5,548
|
py
|
Python
|
oneshot/alfassy/img_to_vec.py
|
nganltp/admicro-LaSO
|
857d67a40af437ab57068fb0de35e4ada56c6209
|
[
"BSD-3-Clause"
] | 83
|
2019-04-14T06:58:15.000Z
|
2022-03-01T01:34:03.000Z
|
oneshot/alfassy/img_to_vec.py
|
leokarlin/LaSO
|
8941bdc9316361ad03dbc2bcabd4bf9922c0ecc7
|
[
"BSD-3-Clause"
] | 17
|
2019-04-28T04:26:24.000Z
|
2022-01-19T15:37:42.000Z
|
oneshot/alfassy/img_to_vec.py
|
nganltp/admicro-LaSO
|
857d67a40af437ab57068fb0de35e4ada56c6209
|
[
"BSD-3-Clause"
] | 15
|
2019-09-05T04:22:10.000Z
|
2022-01-13T15:31:25.000Z
|
import torch
import torch.nn as nn
import torchvision.models as models
import torchvision.transforms as transforms
import os
import random
import torch.nn.functional as F
use_cuda = True if torch.cuda.is_available() else False
random.seed(5)
torch.manual_seed(5)
if use_cuda:
torch.cuda.manual_seed_all(5)
class Img2OurVec():
#def __init__(self, model='inception_v3', layer='default', layer_output_size=2048):
def __init__(self, model='inception', layer='default', layer_output_size=2048, data="top10", transform=None):
""" Img2Vec
:param model: String name of requested model
:param layer: String or Int depending on model. See more docs: https://github.com/christiansafka/img2vec.git
:param layer_output_size: Int depicting the output size of the requested layer
"""
cuda = True if torch.cuda.is_available() else False
self.device = torch.device("cuda" if cuda else "cpu")
self.layer_output_size = layer_output_size
# self.model_path = '/dccstor/alfassy/saved_models/inception_traincocoInceptionT10Half2018.9.1.9:30epoch:71'
# self.model_path = '/dccstor/alfassy/saved_models/inception_trainCocoIncHalf2018.10.3.13:39best'
# self.model_path = '/dccstor/alfassy/saved_models/inception_trainCocoIncHalf2018.10.8.12:46best'
self.model_path = '/dccstor/alfassy/saved_models/inception_trainCocoIncHalf642018.10.9.13:44epoch:30'
self.model, self.extraction_layer = self._get_model_and_layer(model, layer, data)
self.model = self.model.to(self.device)
self.model.eval()
#self.scaler = transforms.Resize(224, 224)
#self.scaler = transforms.Scale((224, 224))
self.transform = transform
self.model_name = model
def get_vec(self, image, tensor=True):
""" Get vector embedding from PIL image
:param img: PIL Image
:param tensor: If True, get_vec will return a FloatTensor instead of Numpy array
:returns: Numpy ndarray
"""
if self.transform is not None:
image = self.transform(image).unsqueeze(0).to(self.device)
batch_size = image.shape[0]
# print(image.shape)
if self.model_name == "inception":
my_embedding = torch.zeros(batch_size, self.layer_output_size, 8, 8).to(self.device)
else:
my_embedding = torch.zeros(batch_size, self.layer_output_size, 1, 1).to(self.device)
def copy_data_resnet(m, i, o):
my_embedding.copy_(o.data)
def copy_data_inception(m, i, o):
my_embedding.copy_(i.data)
if self.model_name == "inception":
h = self.extraction_layer.register_forward_hook(copy_data_resnet)
else:
h = self.extraction_layer.register_forward_hook(copy_data_resnet)
h_x = self.model(image)
h.remove()
# print(my_embedding.shape)
my_embedding = F.avg_pool2d(my_embedding, kernel_size=8)
if tensor:
return my_embedding
else:
return my_embedding.numpy()[0, :, 0, 0]
def _get_model_and_layer(self, model_name, layer, data):
""" Internal method for getting layer from model
:param model_name: model name such as 'resnet-18'
:param layer: layer as a string for resnet-18 or int for alexnet
:returns: pytorch model, selected layer
"""
if data == "full":
out_size = 200
else:
out_size = 80
if model_name == 'inception':
model = models.inception_v3(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, out_size)
num_ftrs = model.AuxLogits.fc.in_features
model.AuxLogits.fc = nn.Linear(num_ftrs, out_size)
elif model_name == 'resnet18':
model = models.resnet18(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, out_size)
elif model_name == 'ourT10Class':
# model = torch.load('/dccstor/alfassy/saved_models/trained_discriminatorfeatureClassifierTrain2018.8.22.12:54epoch:128')
model = torch.load('/dccstor/alfassy/saved_models/inception_trainincT10Half2018.9.4.14:40epoch:26')
else:
raise KeyError('Model %s was not found' % model_name)
model.eval()
if use_cuda:
model.cuda()
if model_name == 'inception' or model_name == 'resnet18':
# Load checkpoint.
assert os.path.isfile(self.model_path), 'Error: no checkpoint found!'
checkpoint = torch.load(self.model_path)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
if model_name == 'inception':
if layer == 'default':
layer = model._modules.get('Mixed_7c')
self.layer_output_size = 2048
else:
raise Exception('wrong layer name')
return model, layer
elif model_name == 'resnet18':
if layer == 'default':
layer = model._modules.get('avgpool')
self.layer_output_size = 512
else:
raise Exception('wrong layer name')
return model, layer
elif model_name == 'ourT10Class':
layer = model._modules.get('linear_block')
self.layer_output_size = 2048
return model, layer
| 41.096296
| 133
| 0.634283
|
4a0883c875e9c19c6a60ce1562c7099d5779a09d
| 2,361
|
py
|
Python
|
bin/vault_utils/vault_interface.py
|
splunk/TA-VaultSync
|
4012c31adf12467dcb940d6dea43050c22670c11
|
[
"Apache-2.0"
] | 6
|
2020-10-22T15:49:01.000Z
|
2021-09-20T17:35:26.000Z
|
bin/vault_utils/vault_interface.py
|
splunk/TA-VaultSync
|
4012c31adf12467dcb940d6dea43050c22670c11
|
[
"Apache-2.0"
] | null | null | null |
bin/vault_utils/vault_interface.py
|
splunk/TA-VaultSync
|
4012c31adf12467dcb940d6dea43050c22670c11
|
[
"Apache-2.0"
] | 3
|
2020-10-21T18:50:58.000Z
|
2022-03-27T09:52:57.000Z
|
# Copyright 2020 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import json
from .engine import VaultEngine
class Vault(object):
def __init__(self, addr, approle_path, role_id, secret_id, namespace=None):
self._addr = addr
self._api_url = "{0}/v1".format(addr)
self._approle_path = approle_path
self._role_id = role_id
self._secret_id = secret_id
self._namespace = namespace
self._token = self._authenticate_approle()
# TODO - this should be abstracted into an authentication method class
def _authenticate_approle(self):
auth_data = json.dumps({
"role_id": self._role_id,
"secret_id": self._secret_id,
})
auth_url = self.url_for_path(
"auth/{0}/login".format(self._approle_path)
)
headers = {}
self._add_namespace(headers)
r = requests.post(auth_url, data=auth_data, headers=headers)
r.raise_for_status()
# TODO - this needs error handling
return r.json()["auth"]["client_token"]
def _add_namespace(self, headers):
if self._namespace:
headers["X-Vault-Namespace"] = self._namespace
def _add_token(self, headers):
headers["X-Vault-Token"] = self._token
def url_for_path(self, path):
return "{0}/{1}".format(self._api_url, path)
def engine(self, engine_type, engine_path):
return VaultEngine.engine_at_path(self, engine_type, engine_path)
def _get(self, path, params={}):
url = self.url_for_path(path)
headers = {}
self._add_namespace(headers)
self._add_token(headers)
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
return response.json()["data"]
| 33.253521
| 79
| 0.657349
|
4a08851fbba46526676fc5f1d4690e240f5f986c
| 34
|
py
|
Python
|
server/problem_sets/gen/gens/addition_integers/__init__.py
|
iiridescent/problem-sets
|
e906fe7509cd158ecdb5920853636339d4d531c3
|
[
"MIT"
] | null | null | null |
server/problem_sets/gen/gens/addition_integers/__init__.py
|
iiridescent/problem-sets
|
e906fe7509cd158ecdb5920853636339d4d531c3
|
[
"MIT"
] | 5
|
2021-03-09T10:36:59.000Z
|
2022-02-26T14:36:08.000Z
|
server/problem_sets/gen/gens/addition_integers/__init__.py
|
vinhowe/problem-sets
|
e906fe7509cd158ecdb5920853636339d4d531c3
|
[
"MIT"
] | null | null | null |
from .addition_integers import *
| 11.333333
| 32
| 0.794118
|
4a0885530bbd33fd56f2ab77b92125d61a5d343d
| 541
|
py
|
Python
|
blog/models.py
|
SantiQ-94/djangoExperiment
|
5f30b3a1fbc7503563b3d317f666e8d265b72d20
|
[
"MIT"
] | null | null | null |
blog/models.py
|
SantiQ-94/djangoExperiment
|
5f30b3a1fbc7503563b3d317f666e8d265b72d20
|
[
"MIT"
] | null | null | null |
blog/models.py
|
SantiQ-94/djangoExperiment
|
5f30b3a1fbc7503563b3d317f666e8d265b72d20
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now
)
published_date = models.DateTimeField(
blank=True, null=True
)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
| 25.761905
| 69
| 0.658041
|
4a0885ce0730a6868b7b5898ee88fcd1b3665ed6
| 4,716
|
py
|
Python
|
ivy/functional/backends/tensorflow/data_type.py
|
juliagsy/ivy
|
e0bf8bd88704a8d985941a045c2e2de96d44deb9
|
[
"Apache-2.0"
] | 681
|
2022-01-18T19:08:56.000Z
|
2022-03-31T22:48:37.000Z
|
ivy/functional/backends/tensorflow/data_type.py
|
juliagsy/ivy
|
e0bf8bd88704a8d985941a045c2e2de96d44deb9
|
[
"Apache-2.0"
] | 637
|
2022-01-19T07:40:28.000Z
|
2022-03-31T19:06:47.000Z
|
ivy/functional/backends/tensorflow/data_type.py
|
juliagsy/ivy
|
e0bf8bd88704a8d985941a045c2e2de96d44deb9
|
[
"Apache-2.0"
] | 501
|
2022-01-23T14:48:35.000Z
|
2022-03-31T04:09:38.000Z
|
# global
import numpy as np
import tensorflow as tf
from typing import Union, Tuple, List
from tensorflow.python.types.core import Tensor
from tensorflow.python.framework.dtypes import DType
# local
import ivy
def can_cast(from_: Union[tf.DType, Tensor], to: tf.DType) -> bool:
if isinstance(from_, Tensor):
from_ = from_.dtype
from_str = str(from_)
to_str = str(to)
if ivy.dtype_bits(to) < ivy.dtype_bits(from_):
return False
if "'int" in from_str and "uint" in to_str:
return False
if "bool" in from_str and (("int" in to_str) or ("float" in to_str)):
return False
if "int" in from_str and (("float" in to_str) or ("bool" in to_str)):
return False
if "float" in from_str and "bool" in to_str:
return False
if "float" in from_str and "int" in to_str:
return False
if "uint" in from_str and "'int" in to_str:
if ivy.dtype_bits(to) <= ivy.dtype_bits(from_):
return False
return True
ivy_dtype_dict = {
tf.int8: "int8",
tf.int16: "int16",
tf.int32: "int32",
tf.int64: "int64",
tf.uint8: "uint8",
tf.uint16: "uint16",
tf.uint32: "uint32",
tf.uint64: "uint64",
tf.bfloat16: "bfloat16",
tf.float16: "float16",
tf.float32: "float32",
tf.float64: "float64",
tf.bool: "bool",
}
native_dtype_dict = {
"int8": tf.int8,
"int16": tf.int16,
"int32": tf.int32,
"int64": tf.int64,
"uint8": tf.uint8,
"uint16": tf.uint16,
"uint32": tf.uint32,
"uint64": tf.uint64,
"bfloat16": tf.bfloat16,
"float16": tf.float16,
"float32": tf.float32,
"float64": tf.float64,
"bool": tf.bool,
}
# noinspection PyShadowingBuiltins
def iinfo(type: Union[DType, str, Tensor]) -> np.iinfo:
return tf.experimental.numpy.iinfo(ivy.as_ivy_dtype(type))
class Finfo:
def __init__(self, tf_finfo):
self._tf_finfo = tf_finfo
@property
def bits(self):
return self._tf_finfo.bits
@property
def eps(self):
return float(self._tf_finfo.eps)
@property
def max(self):
return float(self._tf_finfo.max)
@property
def min(self):
return float(self._tf_finfo.min)
@property
def smallest_normal(self):
return float(self._tf_finfo.tiny)
# noinspection PyShadowingBuiltins
def finfo(type: Union[DType, str, Tensor]) -> Finfo:
return Finfo(tf.experimental.numpy.finfo(ivy.as_native_dtype(type)))
def result_type(*arrays_and_dtypes: Union[Tensor, tf.DType]) -> tf.DType:
if len(arrays_and_dtypes) <= 1:
return tf.experimental.numpy.result_type(arrays_and_dtypes)
result = tf.experimental.numpy.result_type(
arrays_and_dtypes[0], arrays_and_dtypes[1]
)
for i in range(2, len(arrays_and_dtypes)):
result = tf.experimental.numpy.result_type(result, arrays_and_dtypes[i])
return result
def broadcast_to(x: Tensor, shape: Tuple[int, ...]) -> Tensor:
return tf.broadcast_to(x, shape)
def broadcast_arrays(*arrays: Tensor) -> List[Tensor]:
if len(arrays) > 1:
desired_shape = tf.broadcast_dynamic_shape(arrays[0].shape, arrays[1].shape)
if len(arrays) > 2:
for i in range(2, len(arrays)):
desired_shape = tf.broadcast_dynamic_shape(
desired_shape, arrays[i].shape
)
else:
return [arrays[0]]
result = []
for tensor in arrays:
result.append(tf.broadcast_to(tensor, desired_shape))
return result
def astype(x: Tensor, dtype: tf.DType, copy: bool = True) -> Tensor:
dtype = ivy.as_native_dtype(dtype)
if copy:
if x.dtype == dtype:
new_tensor = tf.experimental.numpy.copy(x)
return new_tensor
else:
if x.dtype == dtype:
return x
else:
new_tensor = tf.experimental.numpy.copy(x)
new_tensor = tf.cast(new_tensor, dtype)
return new_tensor
return tf.cast(x, dtype)
def dtype_bits(dtype_in):
dtype_str = as_ivy_dtype(dtype_in)
if "bool" in dtype_str:
return 1
return int(
dtype_str.replace("tf.", "")
.replace("uint", "")
.replace("int", "")
.replace("bfloat", "")
.replace("float", "")
)
def dtype(x, as_native=False):
if as_native:
return ivy.to_native(x).dtype
return as_ivy_dtype(x.dtype)
def as_ivy_dtype(dtype_in):
if isinstance(dtype_in, str):
return ivy.Dtype(dtype_in)
return ivy.Dtype(ivy_dtype_dict[dtype_in])
def as_native_dtype(dtype_in):
if not isinstance(dtype_in, str):
return dtype_in
return native_dtype_dict[ivy.Dtype(dtype_in)]
| 26.055249
| 84
| 0.627439
|
4a0885e97e2f6797fddebfca599e6516ea07f160
| 36,898
|
py
|
Python
|
bertviz/pytorch_transformers_attn/modeling_gpt2.py
|
PradyumnaGupta/bertviz
|
7460c3042b4eb9b1c1005fc1aae8709e0edf7f49
|
[
"Apache-2.0"
] | 2
|
2019-09-03T20:26:51.000Z
|
2020-06-25T14:27:24.000Z
|
bertviz/pytorch_transformers_attn/modeling_gpt2.py
|
PradyumnaGupta/bertviz
|
7460c3042b4eb9b1c1005fc1aae8709e0edf7f49
|
[
"Apache-2.0"
] | null | null | null |
bertviz/pytorch_transformers_attn/modeling_gpt2.py
|
PradyumnaGupta/bertviz
|
7460c3042b4eb9b1c1005fc1aae8709e0edf7f49
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Change log
# 7/14/19 Jesse Vig Adapted for use in visualization
"""PyTorch OpenAI GPT-2 model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import json
import logging
import math
import os
import sys
from io import open
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .modeling_utils import (Conv1D, CONFIG_NAME, WEIGHTS_NAME, PretrainedConfig,
PreTrainedModel, prune_conv1d_layer, SequenceSummary,
add_start_docstrings)
from .modeling_bert import BertLayerNorm as LayerNorm
logger = logging.getLogger(__name__)
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin",
"gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-pytorch_model.bin"}
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-config.json",
"gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-config.json"}
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'w' or l[0] == 'g':
pointer = getattr(pointer, 'weight')
elif l[0] == 'b':
pointer = getattr(pointer, 'bias')
elif l[0] == 'wpe' or l[0] == 'wte':
pointer = getattr(pointer, l[0])
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
class GPT2Config(PretrainedConfig):
"""Configuration class to store the configuration of a `GPT2Model`.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
layer_norm_epsilon: epsilon to use in the layer norm layers
resid_pdrop: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attn_pdrop: The dropout ratio for the attention
probabilities.
embd_pdrop: The dropout ratio for the embeddings.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
pretrained_config_archive_map = GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(
self,
vocab_size_or_config_json_file=50257,
n_positions=1024,
n_ctx=1024,
n_embd=768,
n_layer=12,
n_head=12,
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
num_labels=1,
summary_type='token_ids',
summary_use_proj=True,
summary_activation=None,
summary_proj_to_labels=True,
summary_first_dropout=0.1,
**kwargs
):
"""Constructs GPT2Config.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
layer_norm_epsilon: epsilon to use in the layer norm layers
resid_pdrop: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attn_pdrop: The dropout ratio for the attention
probabilities.
embd_pdrop: The dropout ratio for the embeddings.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
super(GPT2Config, self).__init__(**kwargs)
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.num_labels = num_labels
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@property
def max_position_embeddings(self):
return self.n_positions
@property
def hidden_size(self):
return self.n_embd
@property
def num_attention_heads(self):
return self.n_head
@property
def num_hidden_layers(self):
return self.n_layer
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super(Attention, self).__init__()
self.output_attentions = config.output_attentions
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.n_head, self.split_size // self.n_head)
for head in heads:
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
index_attn = torch.cat([index, index + self.split_size, index + (2*self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
def _attn(self, q, k, v, head_mask=None):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
nd, ns = w.size(-2), w.size(-1)
b = self.bias[:, :, ns-nd:ns, :ns]
w = w * b - 1e4 * (1 - b)
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [torch.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(self, x, layer_past=None, head_mask=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking
attn_outputs = self._attn(query, key, value, head_mask)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
if self.output_attentions:
attention_probs = attn_outputs[1]
attn_data = {
'attn': attention_probs,
'queries': query,
'keys': key.transpose(-1, -2)
}
outputs = [a, present, attn_data]
else:
outputs = [a, present]
return outputs # a, present, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = gelu
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super(Block, self).__init__()
nx = config.n_embd
self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
def forward(self, x, layer_past=None, head_mask=None):
output_attn = self.attn(self.ln_1(x), layer_past=layer_past, head_mask=head_mask)
a = output_attn[0] # output_attn: a, present, (attentions)
x = x + a
m = self.mlp(self.ln_2(x))
x = x + m
outputs = [x] + output_attn[1:]
return outputs # x, present, (attentions)
class GPT2PreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = GPT2Config
pretrained_model_archive_map = GPT2_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_gpt2
base_model_prefix = "transformer"
def __init__(self, *inputs, **kwargs):
super(GPT2PreTrainedModel, self).__init__(*inputs, **kwargs)
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
GPT2_START_DOCSTRING = r""" OpenAI GPT-2 model was proposed in
`Language Models are Unsupervised Multitask Learners`_
by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
It's a causal (unidirectional) transformer pre-trained using language modeling on a very large
corpus of ~40 GB of text data.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`Language Models are Unsupervised Multitask Learners`:
https://openai.com/blog/better-language-models/
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~pytorch_transformers.GPT2Config`): Model configuration class with all the parameters of the model.
"""
GPT2_INPUTS_DOCSTRING = r""" Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`pytorch_transformers.BPT2Tokenizer`.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1[``.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
A parallel sequence of tokens (can be used to indicate various portions of the inputs).
The embeddings from these tokens will be summed with the respective token embeddings.
Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).
**past**:
list of ``torch.FloatTensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `past` output below). Can be used to speed up sequential decoding.
**attention_mask**: (`optional`) ``torch.Tensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
@add_start_docstrings("The bare GPT2 Model transformer outputing raw hidden-states without any specific head on top.",
GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING)
class GPT2Model(GPT2PreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the last layer of the model.
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = GPT2Config.from_pretrained('gpt2')
>>> tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
>>> model = GPT2Model(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(GPT2Model, self).__init__(config)
self.output_hidden_states = config.output_hidden_states
config.output_attentions = True
self.output_attentions = config.output_attentions
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
self.ln_f = LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.apply(self.init_weights)
def _resize_token_embeddings(self, new_num_tokens):
self.wte = self._get_resized_embeddings(self.wte, new_num_tokens)
return self.wte
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None, head_mask=None):
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.n_layer
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_ids.size(-1))
position_ids = position_ids.view(-1, position_ids.size(-1))
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.wte(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = ()
all_attentions = []
all_hidden_states = ()
for i, (block, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = block(hidden_states, layer_past, head_mask[i])
hidden_states, present = outputs[:2]
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states, presents)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# # let the number of heads free (-1) so we can extract attention even after head pruning
# attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]
# all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs # last hidden state, presents, (all hidden_states), (attentions)
@add_start_docstrings("""The GPT2 Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """, GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING)
class GPT2LMHeadModel(GPT2PreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-1`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = GPT2Config.from_pretrained('gpt2')
>>> tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
>>> model = GPT2LMHeadModel(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, labels=input_ids)
>>> loss, logits = outputs[:2]
"""
def __init__(self, config):
super(GPT2LMHeadModel, self).__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head,
self.transformer.wte)
def forward(self, input_ids, position_ids=None, token_type_ids=None, labels=None, past=None, head_mask=None):
transformer_outputs = self.transformer(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
past=past, head_mask=head_mask)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions)
@add_start_docstrings("""The GPT2 Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
the classification head takes as input the input of a specified classification token index in the intput sequence).
""", GPT2_START_DOCSTRING)
class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
r""" Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Indices can be obtained using :class:`pytorch_transformers.BPT2Tokenizer`.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**mc_token_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices)``:
Index of the classification token in each input sequence.
Selected in the range ``[0, input_ids.size(-1) - 1[``.
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1[``.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
A parallel sequence of tokens (can be used to indicate various portions of the inputs).
The embeddings from these tokens will be summed with the respective token embeddings.
Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).
**past**:
list of ``torch.FloatTensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `past` output below). Can be used to speed up sequential decoding.
**attention_mask**: (`optional`) ``torch.Tensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-1`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
**multiple_choice_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
`multiple_choice_labels`: optional multiple choice labels: ``torch.LongTensor`` of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**lm_loss**: (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Language modeling loss.
**mc_loss**: (`optional`, returned when ``multiple_choice_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Multiple choice classification loss.
**lm_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**mc_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)``
Prediction scores of the multiplechoice classification head (scores for each choice before SoftMax).
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = GPT2Config.from_pretrained('gpt2')
>>> tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
>>> model = GPT2DoubleHeadsModel(config)
>>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] # Assume you've added [CLS] to the vocabulary
>>> input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
>>> mc_token_ids = torch.tensor([-1, -1]).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, mc_token_ids)
>>> lm_prediction_scores, mc_prediction_scores = outputs[:2]
"""
def __init__(self, config):
super(GPT2DoubleHeadsModel, self).__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.multiple_choice_head = SequenceSummary(config)
self.apply(self.init_weights)
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head,
self.transformer.wte)
def forward(self, input_ids, mc_token_ids=None, lm_labels=None, mc_labels=None, token_type_ids=None,
position_ids=None, past=None, head_mask=None):
transformer_outputs = self.transformer(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
past=past, head_mask=head_mask)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)),
mc_labels.view(-1))
outputs = (loss,) + outputs
if lm_labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (lm loss), (mc loss), lm logits, mc logits, presents, (all hidden_states), (attentions)
| 49.197333
| 136
| 0.643639
|
4a08861dbc949a66e57066cf6fb7190274746f5d
| 28,472
|
py
|
Python
|
python/ccxt/async_support/gateio.py
|
rekurt/ccxt
|
0769d256b359e10d9cabf4886de68ba1cb393220
|
[
"MIT"
] | 1
|
2019-10-15T05:53:59.000Z
|
2019-10-15T05:53:59.000Z
|
python/ccxt/async_support/gateio.py
|
rekurt/ccxt
|
0769d256b359e10d9cabf4886de68ba1cb393220
|
[
"MIT"
] | 4
|
2020-09-07T14:47:15.000Z
|
2021-05-10T17:47:05.000Z
|
python/ccxt/async_support/gateio.py
|
rekurt/ccxt
|
0769d256b359e10d9cabf4886de68ba1cb393220
|
[
"MIT"
] | 2
|
2019-12-02T10:32:52.000Z
|
2020-03-22T00:58:58.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
class gateio (Exchange):
def describe(self):
return self.deep_extend(super(gateio, self).describe(), {
'id': 'gateio',
'name': 'Gate.io',
'countries': ['CN'],
'version': '2',
'rateLimit': 1000,
'has': {
'CORS': False,
'createMarketOrder': False,
'fetchTickers': True,
'withdraw': True,
'fetchDeposits': True,
'fetchWithdrawals': True,
'fetchTransactions': True,
'createDepositAddress': True,
'fetchDepositAddress': True,
'fetchClosedOrders': False,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrderTrades': True,
'fetchOrders': True,
'fetchOrder': True,
'fetchMyTrades': True,
},
'timeframes': {
'1m': '60',
'5m': '300',
'10m': '600',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'1w': '604800',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/31784029-0313c702-b509-11e7-9ccc-bc0da6a0e435.jpg',
'api': {
'public': 'https://data.gate.io/api',
'private': 'https://data.gate.io/api',
},
'www': 'https://gate.io/',
'doc': 'https://gate.io/api2',
'fees': [
'https://gate.io/fee',
'https://support.gate.io/hc/en-us/articles/115003577673',
],
'referral': 'https://www.gate.io/signup/2436035',
},
'api': {
'public': {
'get': [
'candlestick2/{id}',
'pairs',
'marketinfo',
'marketlist',
'tickers',
'ticker/{id}',
'orderBook/{id}',
'trade/{id}',
'tradeHistory/{id}',
'tradeHistory/{id}/{tid}',
],
},
'private': {
'post': [
'balances',
'depositAddress',
'newAddress',
'depositsWithdrawals',
'buy',
'sell',
'cancelOrder',
'cancelAllOrders',
'getOrder',
'openOrders',
'tradeHistory',
'withdraw',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'maker': 0.002,
'taker': 0.002,
},
},
'exceptions': {
'4': DDoSProtection,
'7': NotSupported,
'8': NotSupported,
'9': NotSupported,
'15': DDoSProtection,
'16': OrderNotFound,
'17': OrderNotFound,
'21': InsufficientFunds,
},
# https://gate.io/api2#errCode
'errorCodeNames': {
'1': 'Invalid request',
'2': 'Invalid version',
'3': 'Invalid request',
'4': 'Too many attempts',
'5': 'Invalid sign',
'6': 'Invalid sign',
'7': 'Currency is not supported',
'8': 'Currency is not supported',
'9': 'Currency is not supported',
'10': 'Verified failed',
'11': 'Obtaining address failed',
'12': 'Empty params',
'13': 'Internal error, please report to administrator',
'14': 'Invalid user',
'15': 'Cancel order too fast, please wait 1 min and try again',
'16': 'Invalid order id or order is already closed',
'17': 'Invalid orderid',
'18': 'Invalid amount',
'19': 'Not permitted or trade is disabled',
'20': 'Your order size is too small',
'21': 'You don\'t have enough fund',
},
'options': {
'fetchTradesMethod': 'public_get_tradehistory_id', # 'public_get_tradehistory_id_tid'
'limits': {
'cost': {
'min': {
'BTC': 0.0001,
'ETH': 0.001,
'USDT': 1,
},
},
},
},
})
async def fetch_markets(self, params={}):
response = await self.publicGetMarketinfo(params)
markets = self.safe_value(response, 'pairs')
if not markets:
raise ExchangeError(self.id + ' fetchMarkets got an unrecognized response')
result = []
for i in range(0, len(markets)):
market = markets[i]
keys = list(market.keys())
id = keys[0]
details = market[id]
# all of their symbols are separated with an underscore
# but not boe_eth_eth(BOE_ETH/ETH) which has two underscores
# https://github.com/ccxt/ccxt/issues/4894
parts = id.split('_')
numParts = len(parts)
baseId = parts[0]
quoteId = parts[1]
if numParts > 2:
baseId = parts[0] + '_' + parts[1]
quoteId = parts[2]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': 8,
'price': details['decimal_places'],
}
amountLimits = {
'min': details['min_amount'],
'max': None,
}
priceLimits = {
'min': math.pow(10, -details['decimal_places']),
'max': None,
}
defaultCost = amountLimits['min'] * priceLimits['min']
minCost = self.safe_float(self.options['limits']['cost']['min'], quote, defaultCost)
costLimits = {
'min': minCost,
'max': None,
}
limits = {
'amount': amountLimits,
'price': priceLimits,
'cost': costLimits,
}
active = True
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'active': active,
'maker': details['fee'] / 100,
'taker': details['fee'] / 100,
'precision': precision,
'limits': limits,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privatePostBalances(params)
result = {'info': response}
available = self.safe_value(response, 'available', {})
if isinstance(available, list):
available = {}
locked = self.safe_value(response, 'locked', {})
currencyIds = list(available.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float(available, currencyId)
account['used'] = self.safe_float(locked, currencyId)
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'id': self.market_id(symbol),
}
response = await self.publicGetOrderBookId(self.extend(request, params))
return self.parse_order_book(response)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
# they return [Timestamp, Volume, Close, High, Low, Open]
return [
int(ohlcv[0]), # t
float(ohlcv[5]), # o
float(ohlcv[3]), # h
float(ohlcv[4]), # l
float(ohlcv[2]), # c
float(ohlcv[1]), # v
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'id': market['id'],
'group_sec': self.timeframes[timeframe],
}
# max limit = 1001
if limit is not None:
periodDurationInSeconds = self.parse_timeframe(timeframe)
hours = int((periodDurationInSeconds * limit) / 3600)
request['range_hour'] = max(0, hours - 1)
response = await self.publicGetCandlestick2Id(self.extend(request, params))
#
# {
# "elapsed": "15ms",
# "result": "true",
# "data": [
# ["1553930820000", "1.005299", "4081.05", "4086.18", "4081.05", "4086.18"],
# ["1553930880000", "0.110923277", "4095.2", "4095.23", "4091.15", "4091.15"],
# ...
# ["1553934420000", "0", "4089.42", "4089.42", "4089.42", "4089.42"],
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
percentage = self.safe_float(ticker, 'percentChange')
open = None
change = None
average = None
if (last is not None) and (percentage is not None):
relativeChange = percentage / 100
open = last / self.sum(1, relativeChange)
change = last - open
average = self.sum(last, open) / 2
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high24hr'),
'low': self.safe_float(ticker, 'low24hr'),
'bid': self.safe_float(ticker, 'highestBid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'lowestAsk'),
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': self.safe_float(ticker, 'quoteVolume'), # gateio has them reversed
'quoteVolume': self.safe_float(ticker, 'baseVolume'),
'info': ticker,
}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
resultString = self.safe_string(response, 'result', '')
if resultString != 'false':
return
errorCode = self.safe_string(response, 'code')
if errorCode is not None:
exceptions = self.exceptions
errorCodeNames = self.errorCodeNames
if errorCode in exceptions:
message = ''
if errorCode in errorCodeNames:
message = errorCodeNames[errorCode]
else:
message = self.safe_string(response, 'message', '(unknown)')
raise exceptions[errorCode](message)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetTickers(params)
result = {}
ids = list(response.keys())
for i in range(0, len(ids)):
id = ids[i]
baseId, quoteId = id.split('_')
base = baseId.upper()
quote = quoteId.upper()
base = self.safe_currency_code(base)
quote = self.safe_currency_code(quote)
symbol = base + '/' + quote
market = None
if symbol in self.markets:
market = self.markets[symbol]
if id in self.markets_by_id:
market = self.markets_by_id[id]
result[symbol] = self.parse_ticker(response[id], market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
ticker = await self.publicGetTickerId(self.extend({
'id': market['id'],
}, params))
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market=None):
timestamp = self.safe_timestamp_2(trade, 'timestamp', 'time_unix')
id = self.safe_string_2(trade, 'tradeID', 'id')
# take either of orderid or orderId
orderId = self.safe_string_2(trade, 'orderid', 'orderNumber')
price = self.safe_float(trade, 'rate')
amount = self.safe_float(trade, 'amount')
type = self.safe_string(trade, 'type')
cost = None
if price is not None:
if amount is not None:
cost = price * amount
symbol = None
if market is not None:
symbol = market['symbol']
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': type,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'id': market['id'],
}
method = self.safe_string(self.options, 'fetchTradesMethod', 'public_get_tradehistory_id')
response = await getattr(self, method)(self.extend(request, params))
return self.parse_trades(response['data'], market, since, limit)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
response = await self.privatePostOpenOrders(params)
return self.parse_orders(response['orders'], None, since, limit)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'orderNumber': id,
'currencyPair': self.market_id(symbol),
}
response = await self.privatePostGetOrder(self.extend(request, params))
return self.parse_order(response['order'])
def parse_order_status(self, status):
statuses = {
'cancelled': 'canceled',
# 'closed': 'closed', # these two statuses aren't actually needed
# 'open': 'open', # as they are mapped one-to-one
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# {'amount': '0.00000000',
# 'currencyPair': 'xlm_usdt',
# 'fee': '0.0113766632239302 USDT',
# 'feeCurrency': 'USDT',
# 'feePercentage': 0.18,
# 'feeValue': '0.0113766632239302',
# 'filledAmount': '30.14004987',
# 'filledRate': 0.2097,
# 'initialAmount': '30.14004987',
# 'initialRate': '0.2097',
# 'left': 0,
# 'orderNumber': '998307286',
# 'rate': '0.2097',
# 'status': 'closed',
# 'timestamp': 1531158583,
# 'type': 'sell'},
#
id = self.safe_string(order, 'orderNumber')
symbol = None
marketId = self.safe_string(order, 'currencyPair')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
timestamp = self.safe_timestamp(order, 'timestamp')
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'type')
price = self.safe_float(order, 'filledRate')
amount = self.safe_float(order, 'initialAmount')
filled = self.safe_float(order, 'filledAmount')
# In the order status response, self field has a different name.
remaining = self.safe_float_2(order, 'leftAmount', 'left')
feeCost = self.safe_float(order, 'feeValue')
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
feeRate = self.safe_float(order, 'feePercentage')
if feeRate is not None:
feeRate = feeRate / 100
return {
'id': id,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'status': status,
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'cost': None,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': {
'cost': feeCost,
'currency': feeCurrencyCode,
'rate': feeRate,
},
'info': order,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
raise ExchangeError(self.id + ' allows limit orders only')
await self.load_markets()
method = 'privatePost' + self.capitalize(side)
market = self.market(symbol)
request = {
'currencyPair': market['id'],
'rate': price,
'amount': amount,
}
response = await getattr(self, method)(self.extend(request, params))
return self.parse_order(self.extend({
'status': 'open',
'type': side,
'initialAmount': amount,
}, response), market)
async def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder requires symbol argument')
await self.load_markets()
request = {
'orderNumber': id,
'currencyPair': self.market_id(symbol),
}
return await self.privatePostCancelOrder(self.extend(request, params))
async def query_deposit_address(self, method, code, params={}):
await self.load_markets()
currency = self.currency(code)
method = 'privatePost' + method + 'Address'
request = {
'currency': currency['id'],
}
response = await getattr(self, method)(self.extend(request, params))
address = self.safe_string(response, 'addr')
tag = None
if (address is not None) and (address.find('address') >= 0):
raise InvalidAddress(self.id + ' queryDepositAddress ' + address)
if code == 'XRP':
parts = address.split(' ')
address = parts[0]
tag = parts[1]
return {
'currency': currency,
'address': address,
'tag': tag,
'info': response,
}
async def create_deposit_address(self, code, params={}):
return await self.query_deposit_address('New', code, params)
async def fetch_deposit_address(self, code, params={}):
return await self.query_deposit_address('Deposit', code, params)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
response = await self.privatePostOpenOrders(params)
return self.parse_orders(response['orders'], market, since, limit)
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'currencyPair': market['id'],
'orderNumber': id,
}
response = await self.privatePostTradeHistory(self.extend(request, params))
return self.parse_trades(response['trades'], market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires symbol param')
await self.load_markets()
market = self.market(symbol)
request = {
'currencyPair': market['id'],
}
response = await self.privatePostTradeHistory(self.extend(request, params))
return self.parse_trades(response['trades'], market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'amount': amount,
'address': address, # Address must exist in you AddressBook in security settings
}
if tag is not None:
request['address'] += ' ' + tag
response = await self.privatePostWithdraw(self.extend(request, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
prefix = (api + '/') if (api == 'private') else ''
url = self.urls['api'][api] + self.version + '/1/' + prefix + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = self.nonce()
request = {'nonce': nonce}
body = self.urlencode(self.extend(request, query))
signature = self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512)
headers = {
'Key': self.apiKey,
'Sign': signature,
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def fetch_transactions_by_type(self, type=None, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
if since is not None:
request['start'] = since
response = await self.privatePostDepositsWithdrawals(self.extend(request, params))
transactions = None
if type is None:
deposits = self.safe_value(response, 'deposits', [])
withdrawals = self.safe_value(response, 'withdraws', [])
transactions = self.array_concat(deposits, withdrawals)
else:
transactions = self.safe_value(response, type, [])
currency = None
if code is not None:
currency = self.currency(code)
return self.parse_transactions(transactions, currency, since, limit)
async def fetch_transactions(self, code=None, since=None, limit=None, params={}):
return await self.fetch_transactions_by_type(None, code, since, limit, params)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
return await self.fetch_transactions_by_type('deposits', code, since, limit, params)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
return await self.fetch_transactions_by_type('withdraws', code, since, limit, params)
def parse_transaction(self, transaction, currency=None):
#
# deposit
#
# {
# 'id': 'd16520849',
# 'currency': 'NEO',
# 'address': False,
# 'amount': '1',
# 'txid': '01acf6b8ce4d24a....',
# 'timestamp': '1553125968',
# 'status': 'DONE',
# 'type': 'deposit'
# }
#
# withdrawal
#
# {
# 'id': 'w5864259',
# 'currency': 'ETH',
# 'address': '0x72632f462....',
# 'amount': '0.4947',
# 'txid': '0x111167d120f736....',
# 'timestamp': '1553123688',
# 'status': 'DONE',
# 'type': 'withdrawal'
# }
#
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId, currency)
id = self.safe_string(transaction, 'id')
txid = self.safe_string(transaction, 'txid')
amount = self.safe_float(transaction, 'amount')
address = self.safe_string(transaction, 'address')
timestamp = self.safe_timestamp(transaction, 'timestamp')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
type = self.parse_transaction_type(id[0])
return {
'info': transaction,
'id': id,
'txid': txid,
'currency': code,
'amount': amount,
'address': address,
'tag': None,
'status': status,
'type': type,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': None,
}
def parse_transaction_status(self, status):
statuses = {
'PEND': 'pending',
'REQUEST': 'pending',
'CANCEL': 'failed',
'DONE': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transaction_type(self, type):
types = {
'd': 'deposit',
'w': 'withdrawal',
}
return self.safe_string(types, type, type)
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'result' in response:
result = response['result']
message = self.id + ' ' + self.json(response)
if result is None:
raise ExchangeError(message)
if isinstance(result, basestring):
if result != 'true':
raise ExchangeError(message)
elif not result:
raise ExchangeError(message)
return response
| 38.52774
| 126
| 0.509553
|
4a0886c11221be1e9033dfa9de319996d501b5c1
| 2,154
|
py
|
Python
|
contentcuration/contentcuration/viewsets/sync/utils.py
|
kollivier/studio
|
9089780858ae9870421056b4e6e5659ae854db57
|
[
"MIT"
] | null | null | null |
contentcuration/contentcuration/viewsets/sync/utils.py
|
kollivier/studio
|
9089780858ae9870421056b4e6e5659ae854db57
|
[
"MIT"
] | null | null | null |
contentcuration/contentcuration/viewsets/sync/utils.py
|
kollivier/studio
|
9089780858ae9870421056b4e6e5659ae854db57
|
[
"MIT"
] | null | null | null |
from django.core.cache import cache
from contentcuration.viewsets.sync.constants import ALL_TABLES
from contentcuration.viewsets.sync.constants import COPIED
from contentcuration.viewsets.sync.constants import CREATED
from contentcuration.viewsets.sync.constants import DELETED
from contentcuration.viewsets.sync.constants import MOVED
from contentcuration.viewsets.sync.constants import UPDATED
from contentcuration.viewsets.sync.constants import USER_CHANGES_PREFIX
def validate_table(table):
if table not in ALL_TABLES:
raise ValueError("{} is not a valid table name".format(table))
def generate_create_event(key, table, obj):
validate_table(table)
return {
"obj": obj,
"key": key,
"table": table,
"type": CREATED,
}
def generate_update_event(key, table, mods):
validate_table(table)
return {
"mods": mods,
"key": key,
"table": table,
"type": UPDATED,
}
def generate_delete_event(key, table):
validate_table(table)
return {
"key": key,
"table": table,
"type": DELETED,
}
def generate_move_event(key, table, target, position):
validate_table(table)
return {
"key": key,
"target": target,
"position": position,
"table": table,
"type": MOVED,
}
def generate_copy_event(
key, table, from_key, target, position=None, mods=None, excluded_descendants=None
):
validate_table(table)
return {
"key": key,
"from_key": from_key,
"target": target,
"position": position,
"mods": mods,
"excluded_descendants": excluded_descendants,
"table": table,
"type": COPIED,
}
def add_event_for_user(user_id, event):
cache_key = USER_CHANGES_PREFIX.format(user_id=user_id)
user_events = cache.get(cache_key) or []
user_events.append(event)
cache.set(cache_key, user_events, None)
def get_and_clear_user_events(user_id):
cache_key = USER_CHANGES_PREFIX.format(user_id=user_id)
user_events = cache.get(cache_key) or []
cache.delete(cache_key)
return user_events
| 25.341176
| 85
| 0.670381
|
4a0886d19b675ee9a0f8e8a4e1e7a5f60047fcbd
| 4,391
|
py
|
Python
|
app.py
|
kimdiep/happiness-journal-app
|
4549721cc71813c4123e3c096e0388455cce9262
|
[
"MIT"
] | null | null | null |
app.py
|
kimdiep/happiness-journal-app
|
4549721cc71813c4123e3c096e0388455cce9262
|
[
"MIT"
] | null | null | null |
app.py
|
kimdiep/happiness-journal-app
|
4549721cc71813c4123e3c096e0388455cce9262
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('./models')
# flask setup
from flask import Flask, request, jsonify, render_template, redirect, url_for, session, flash
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine, and_, text
from sqlalchemy.orm import sessionmaker
from flask_modus import Modus
from flask_bcrypt import Bcrypt
from flask_migrate import Migrate
from functools import wraps
app = Flask(__name__)
# bcrypt for password hashing
bcrypt = Bcrypt(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://localhost/happiness-journal'
modus = Modus(app)
db = SQLAlchemy(app)
Migrate(app, db)
import os
SECRET_KEY = os.urandom(32)
app.config['SECRET_KEY'] = SECRET_KEY
# import models from happiness_journal.py for Idea
from happiness_journal import *
# generate session
def create_session(config):
engine = create_engine(config['SQLALCHEMY_DATABASE_URI'])
Session = sessionmaker(bind=engine)
session = Session()
session._model_changes = {}
return session
manual_session = create_session(app.config)
# import form for user signup and login
# import user model
from forms import UserForm
from user_model import User
from sqlalchemy.exc import IntegrityError
# login decorator
def ensure_logged_in(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not session.get('user_id'):
flash("Whoopsie! You must be logged in first to view this page!")
return redirect(url_for('login'))
return fn(*args, **kwargs)
return wrapper
# app-controllers
# homepage
@app.route('/')
@ensure_logged_in
def index():
text = "Hello, Happiness Journal!"
return render_template('index.html', message = text)
# ideas page
@app.route('/ideas', methods=["GET"])
@ensure_logged_in
def ideas():
text = "My Happiness Journal Ideas!"
ideas = Idea.query.filter_by(user_id = session['user_id'])
return render_template('ideas/homepage.html', message = text, ideas = ideas)
# route to add new idea
@app.route('/ideas/new', methods=['POST', 'GET'])
@ensure_logged_in
def new():
if request.method == 'POST':
new_idea = Idea(request.form['idea_note'], complete=False, user_id = session['user_id'])
db.session.add(new_idea)
db.session.commit()
return redirect(url_for('ideas'))
return render_template('ideas/new.html')
# route to edit existing idea
@app.route('/ideas/edit/<int:id>', methods=["GET", "POST"])
@ensure_logged_in
def edit(id):
idea = Idea.query.get(id)
if request.method == 'POST':
idea.note = request.form['idea_note']
db.session.commit()
return redirect(url_for('ideas'))
return render_template('ideas/edit.html', idea = idea)
# route to delete existing idea
@app.route('/ideas/delete/<int:id>', methods=['GET'])
@ensure_logged_in
def delete(id):
idea = Idea.query.get(id)
db.session.delete(idea)
db.session.commit()
return redirect(url_for('ideas'))
# auth-route for new user to sign up
@app.route('/signup', methods =["GET", "POST"])
def signup():
form = UserForm(request.form)
if request.method == "POST" and form.validate():
try:
new_user = User(form.data['username'], form.data['password'])
db.session.add(new_user)
db.session.commit()
except IntegrityError as e:
flash("Oopsy! Please try again!")
return render_template('users/signup.html', form=form)
return redirect(url_for('login'))
return render_template('users/signup.html', form=form)
# auth-route for existing user to log in
@app.route('/login', methods = ["GET", "POST"])
def login():
form = UserForm(request.form)
if request.method == "POST" and form.validate():
found_user = User.query.filter_by(username = form.data['username']).first()
if found_user:
authenticated_user = bcrypt.check_password_hash(found_user.password, form.data['password'])
if authenticated_user:
flash("Woohoo! You are inside the Happiness Journal!")
session['user_id'] = found_user.id
return redirect(url_for('index'))
flash("Oopsy! Your username and password is unrecognised. Please try again!")
return render_template('users/login.html', form=form)
# logout
@app.route('/logout')
def logout():
session.pop('user_id', None)
flash('Goodbye! See you next time!')
return redirect(url_for('login'))
if __name__=="__main__":
app.run(debug=True)
| 30.282759
| 103
| 0.700752
|
4a08885674ec3eb7bc8bca715ccf00a95095e41a
| 2,012
|
py
|
Python
|
lib/surface/service_directory/namespaces/list.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
lib/surface/service_directory/namespaces/list.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/service_directory/namespaces/list.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 1
|
2020-07-25T01:40:19.000Z
|
2020-07-25T01:40:19.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`gcloud service-directory namespaces list` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.service_directory import namespaces
from googlecloudsdk.api_lib.util import common_args
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.service_directory import resource_args
@base.ReleaseTracks(base.ReleaseTrack.GA)
class List(base.ListCommand):
"""Lists namespaces."""
detailed_help = {
'EXAMPLES':
"""\
To list Service Directory namespaces, run:
$ {command} --location=us-east1
""",
}
@staticmethod
def Args(parser):
resource_args.AddLocationResourceArg(parser, 'to list.', positional=False)
base.LIMIT_FLAG.RemoveFromParser(parser)
base.URI_FLAG.RemoveFromParser(parser)
def Run(self, args):
client = namespaces.NamespacesClient(self.GetReleaseTrack())
location_ref = args.CONCEPTS.location.Parse()
order_by = common_args.ParseSortByArg(args.sort_by)
return client.List(location_ref, args.filter, order_by, args.page_size)
def GetReleaseTrack(self):
return base.ReleaseTrack.GA
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA)
class ListBeta(List):
"""Lists namespaces."""
def GetReleaseTrack(self):
return base.ReleaseTrack.BETA
| 31.936508
| 78
| 0.750497
|
4a08885971e53a9aa778bf828597c31f4d369715
| 1,338
|
py
|
Python
|
app.py
|
vmagelo/msdocs-python-flask-webapp-quickstart
|
f26c73a610a36f34814ea24f9754d2a410da34c8
|
[
"MIT"
] | null | null | null |
app.py
|
vmagelo/msdocs-python-flask-webapp-quickstart
|
f26c73a610a36f34814ea24f9754d2a410da34c8
|
[
"MIT"
] | null | null | null |
app.py
|
vmagelo/msdocs-python-flask-webapp-quickstart
|
f26c73a610a36f34814ea24f9754d2a410da34c8
|
[
"MIT"
] | null | null | null |
from azure.identity import DefaultAzureCredential
from azure.storage.blob import ContainerClient, __version__
from datetime import datetime
from flask import Flask, render_template, request, redirect, url_for
app = Flask(__name__)
@app.route('/')
def index():
print('Request for index page received')
return render_template('index.html')
@app.route('/hello', methods=['POST'])
def hello():
name = request.form.get('name')
if name:
print('Request for hello page received with name=%s' % name)
return render_template('hello.html', name = name)
else:
print('Request for hello page received with no name or blank name -- redirecting')
return redirect(url_for('index'))
@app.route('/list', methods=['POST'])
def list():
storage_url = 'https://vmagelopythonflask.blob.core.windows.net/'
container_name = 'blob-container-01'
credential = DefaultAzureCredential(exclude_shared_token_cache_credential=True)
container_client = ContainerClient(account_url=storage_url, container_name=container_name, credential=credential)
try:
bloblist = ''
for blob in container_client.list_blobs():
bloblist += blob.name + ' '
except Exception as ex:
bloblist = 'error'
return render_template('list.html', list=bloblist)
if __name__ == '__main__':
app.run()
| 29.733333
| 116
| 0.713752
|
4a0888ca1dcb60ceaf30443df208b9875429d04f
| 7,675
|
py
|
Python
|
adb/systrace/catapult/common/py_vulcanize/py_vulcanize/parse_html_deps.py
|
mohanedmoh/TBS
|
6aebf52643911fe0dce7d02825eb0f046da1b3b1
|
[
"Apache-2.0"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
adb/systrace/catapult/common/py_vulcanize/py_vulcanize/parse_html_deps.py
|
mohanedmoh/TBS
|
6aebf52643911fe0dce7d02825eb0f046da1b3b1
|
[
"Apache-2.0"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
adb/systrace/catapult/common/py_vulcanize/py_vulcanize/parse_html_deps.py
|
mohanedmoh/TBS
|
6aebf52643911fe0dce7d02825eb0f046da1b3b1
|
[
"Apache-2.0"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
from py_vulcanize import js_utils
from py_vulcanize import module
from py_vulcanize import strip_js_comments
from py_vulcanize import html_generation_controller
def _AddToPathIfNeeded(path):
if path not in sys.path:
sys.path.insert(0, path)
def _InitBeautifulSoup():
catapult_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
os.path.pardir, os.path.pardir, os.path.pardir))
bs_path = os.path.join(catapult_path, 'third_party', 'beautifulsoup4')
_AddToPathIfNeeded(bs_path)
html5lib_path = os.path.join(catapult_path, 'third_party', 'html5lib-python')
_AddToPathIfNeeded(html5lib_path)
six_path = os.path.join(catapult_path, 'third_party', 'six')
_AddToPathIfNeeded(six_path)
_InitBeautifulSoup()
import bs4
class Script(object):
def __init__(self, soup):
if not soup:
raise module.DepsException('Script object created without soup')
self._soup = soup
def AppendJSContentsToFile(self, f, *args, **kwargs):
raise NotImplementedError()
class InlineScript(Script):
def __init__(self, soup):
super(InlineScript, self).__init__(soup)
self._stripped_contents = None
self._open_tags = None
self.is_external = False
@property
def contents(self):
return unicode(self._soup.string)
@property
def stripped_contents(self):
if not self._stripped_contents:
self._stripped_contents = strip_js_comments.StripJSComments(
self.contents)
return self._stripped_contents
@property
def open_tags(self):
if self._open_tags:
return self._open_tags
open_tags = []
cur = self._soup.parent
while cur:
if isinstance(cur, bs4.BeautifulSoup):
break
open_tags.append(_Tag(cur.name, cur.attrs))
cur = cur.parent
open_tags.reverse()
assert open_tags[-1].tag == 'script'
del open_tags[-1]
self._open_tags = open_tags
return self._open_tags
def AppendJSContentsToFile(self, f, *args, **kwargs):
js = self.contents
escaped_js = js_utils.EscapeJSIfNeeded(js)
f.write(escaped_js)
f.write('\n')
class ExternalScript(Script):
def __init__(self, soup):
super(ExternalScript, self).__init__(soup)
if 'src' not in soup.attrs:
raise Exception("{0} is not an external script.".format(soup))
self.is_external = True
self._loaded_raw_script = None
@property
def loaded_raw_script(self):
if self._loaded_raw_script:
return self._loaded_raw_script
return None
@loaded_raw_script.setter
def loaded_raw_script(self, value):
self._loaded_raw_script = value
@property
def src(self):
return self._soup.attrs['src']
def AppendJSContentsToFile(self,
f,
use_include_tags_for_scripts,
dir_for_include_tag_root):
raw_script = self.loaded_raw_script
if not raw_script:
return
if use_include_tags_for_scripts:
rel_filename = os.path.relpath(raw_script.filename,
dir_for_include_tag_root)
f.write("""<include src="%s">\n""" % rel_filename)
else:
f.write(js_utils.EscapeJSIfNeeded(raw_script.contents))
f.write('\n')
def _CreateSoupWithoutHeadOrBody(html):
soupCopy = bs4.BeautifulSoup(html, 'html5lib')
soup = bs4.BeautifulSoup()
soup.reset()
if soupCopy.head:
for n in soupCopy.head.contents:
n.extract()
soup.append(n)
if soupCopy.body:
for n in soupCopy.body.contents:
n.extract()
soup.append(n)
return soup
class HTMLModuleParserResults(object):
def __init__(self, html):
self._soup = bs4.BeautifulSoup(html, 'html5lib')
self._inline_scripts = None
self._scripts = None
@property
def scripts_external(self):
tags = self._soup.findAll('script', src=True)
return [t['src'] for t in tags]
@property
def inline_scripts(self):
if not self._inline_scripts:
tags = self._soup.findAll('script', src=None)
self._inline_scripts = [InlineScript(t.string) for t in tags]
return self._inline_scripts
@property
def scripts(self):
if not self._scripts:
self._scripts = []
script_elements = self._soup.findAll('script')
for element in script_elements:
if 'src' in element.attrs:
self._scripts.append(ExternalScript(element))
else:
self._scripts.append(InlineScript(element))
return self._scripts
@property
def imports(self):
tags = self._soup.findAll('link', rel='import')
return [t['href'] for t in tags]
@property
def stylesheets(self):
tags = self._soup.findAll('link', rel='stylesheet')
return [t['href'] for t in tags]
@property
def inline_stylesheets(self):
tags = self._soup.findAll('style')
return [unicode(t.string) for t in tags]
def YieldHTMLInPieces(self, controller, minify=False):
yield self.GenerateHTML(controller, minify)
def GenerateHTML(self, controller, minify=False, prettify=False):
soup = _CreateSoupWithoutHeadOrBody(unicode(self._soup))
# Remove declaration.
for x in soup.contents:
if isinstance(x, bs4.Doctype):
x.extract()
# Remove declaration.
for x in soup.contents:
if isinstance(x, bs4.Declaration):
x.extract()
# Remove all imports.
imports = soup.findAll('link', rel='import')
for imp in imports:
imp.extract()
# Remove all script links.
scripts_external = soup.findAll('script', src=True)
for script in scripts_external:
script.extract()
# Remove all in-line scripts.
scripts_external = soup.findAll('script', src=None)
for script in scripts_external:
script.extract()
# Process all in-line styles.
inline_styles = soup.findAll('style')
for style in inline_styles:
html = controller.GetHTMLForInlineStylesheet(unicode(style.string))
if html:
ns = soup.new_tag('style')
ns.append(bs4.NavigableString(html))
style.replaceWith(ns)
else:
style.extract()
# Rewrite all external stylesheet hrefs or remove, as needed.
stylesheet_links = soup.findAll('link', rel='stylesheet')
for stylesheet_link in stylesheet_links:
html = controller.GetHTMLForStylesheetHRef(stylesheet_link['href'])
if html:
tmp = bs4.BeautifulSoup(html, 'html5lib').findAll('style')
assert len(tmp) == 1
stylesheet_link.replaceWith(tmp[0])
else:
stylesheet_link.extract()
# Remove comments if minifying.
if minify:
comments = soup.findAll(
text=lambda text: isinstance(text, bs4.Comment))
for comment in comments:
comment.extract()
if prettify:
return soup.prettify('utf-8').strip()
# We are done.
return unicode(soup).strip()
@property
def html_contents_without_links_and_script(self):
return self.GenerateHTML(
html_generation_controller.HTMLGenerationController())
class _Tag(object):
def __init__(self, tag, attrs):
self.tag = tag
self.attrs = attrs
def __repr__(self):
attr_string = ' '.join('%s="%s"' % (x[0], x[1]) for x in self.attrs)
return '<%s %s>' % (self.tag, attr_string)
class HTMLModuleParser():
def Parse(self, html):
if html is None:
html = ''
else:
if html.find('< /script>') != -1:
raise Exception('Escape script tags with <\/script>')
return HTMLModuleParserResults(html)
| 27.024648
| 79
| 0.672573
|
4a08899892afc289491742601097c20771e21dc0
| 1,035
|
py
|
Python
|
bootcamp/confgenerator/urls.py
|
davismathew/netbot-django
|
5a46368ba7c16790e1b96292eecfde6f8f35d2e5
|
[
"MIT"
] | null | null | null |
bootcamp/confgenerator/urls.py
|
davismathew/netbot-django
|
5a46368ba7c16790e1b96292eecfde6f8f35d2e5
|
[
"MIT"
] | null | null | null |
bootcamp/confgenerator/urls.py
|
davismathew/netbot-django
|
5a46368ba7c16790e1b96292eecfde6f8f35d2e5
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from django.conf.urls import patterns, include, url
from django.views.decorators.csrf import csrf_exempt
from bootcamp.confgenerator import views
urlpatterns = [
url(r'^$', views.listconf, name='listconf'),
url(r'^confinstance/$', csrf_exempt(views.createconfinstance), name='confinstance'),
url(r'^createconf/$', views.createconftemplate, name='createconf'),
url(r'^downloadtemplateout/(?P<id>\d+)/$', csrf_exempt(views.downloadtemplateout), name='downloadtemplateout'),
# url(r'^fetchipamcheck/$', views.fetchipamcheck, name='fetchipamcheck'),
# url(r'^runipamcheck/$', csrf_exempt(views.runipamcheck), name='runipamcheck'),
# url(r'^preview/$', views.preview, name='preview'),
# url(r'^drafts/$', views.drafts, name='drafts'),
# url(r'^comment/$', views.comment, name='comment'),
# url(r'^tag/(?P<tag_name>.+)/$', views.tag, name='tag'),
# url(r'^edit/(?P<id>\d+)/$', views.edit, name='edit_article'),
# url(r'^(?P<slug>[-\w]+)/$', views.article, name='article'),
]
| 49.285714
| 115
| 0.667633
|
4a088b1eb483af140636280fc5a0a4e9d164f968
| 579
|
py
|
Python
|
48-all-lines-all-files/all_lines_all_files.py
|
TonyJenkins/python-workout
|
d2bae778273a99f472a07812e260f849029f5549
|
[
"Unlicense"
] | null | null | null |
48-all-lines-all-files/all_lines_all_files.py
|
TonyJenkins/python-workout
|
d2bae778273a99f472a07812e260f849029f5549
|
[
"Unlicense"
] | null | null | null |
48-all-lines-all-files/all_lines_all_files.py
|
TonyJenkins/python-workout
|
d2bae778273a99f472a07812e260f849029f5549
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
"""
Exercise 48: All lines, all files
Generator to provide an iterator over all the files in a folder.
"""
from os import listdir
from os.path import join as path_join, isfile
def all_lines_all_files(path_to_folder):
for filename in listdir(path_to_folder):
full_filename = path_join(path_to_folder, filename)
if isfile(full_filename):
for each_line in open(full_filename):
yield each_line[:-1]
if __name__ == '__main__':
for line in all_lines_all_files('../45-zoo'):
print(line)
| 23.16
| 68
| 0.675302
|
4a088b5e35022891b4165eb5831b47d1f89daabc
| 14,451
|
py
|
Python
|
src/odict/pyodict.py
|
vishalbelsare/odict
|
4647ef57808d479ed49bb7ef8263f6e6ca9de4f5
|
[
"PSF-2.0"
] | null | null | null |
src/odict/pyodict.py
|
vishalbelsare/odict
|
4647ef57808d479ed49bb7ef8263f6e6ca9de4f5
|
[
"PSF-2.0"
] | null | null | null |
src/odict/pyodict.py
|
vishalbelsare/odict
|
4647ef57808d479ed49bb7ef8263f6e6ca9de4f5
|
[
"PSF-2.0"
] | null | null | null |
# Python Software Foundation License
import copy
import functools
import sys
ITER_FUNC = 'iteritems' if sys.version_info[0] < 3 else 'items'
class _Nil(object):
"""Q: it feels like using the class with "is" and "is not" instead of
"==" and "!=" should be faster.
A: This would break implementations which use pickle for persisting.
"""
def __repr__(self):
return "nil"
def __eq__(self, other):
if (isinstance(other, _Nil)):
return True
else:
return NotImplemented
def __ne__(self, other):
if (isinstance(other, _Nil)):
return False
else:
return NotImplemented
def __hash__(self):
return sys.maxsize
_nil = _Nil()
class _odict(object):
"""Ordered dict data structure, with O(1) complexity for dict operations
that modify one element.
Overwriting values doesn't change their original sequential order.
"""
def _dict_impl(self):
return None
def _list_factory(self):
return list
def __init__(self, data=(), **kwds):
"""This doesn't accept keyword initialization as normal dicts to avoid
a trap - inside a function or method the keyword args are accessible
only as a dict, without a defined order, so their original order is
lost.
"""
if kwds:
raise TypeError("__init__() of ordered dict takes no keyword "
"arguments to avoid an ordering trap.")
dict_ = self._dict_impl()
if dict_ is None:
raise TypeError("No dict implementation class provided.")
dict_.__init__(self)
# If you give a normal dict, then the order of elements is undefined
if hasattr(data, ITER_FUNC):
for key, val in getattr(data, ITER_FUNC)():
self[key] = val
else:
for key, val in data:
self[key] = val
# Double-linked list header
@property
def lh(self):
dict_ = self._dict_impl()
if not hasattr(self, '_lh'):
dict_.__setattr__(self, '_lh', _nil)
return dict_.__getattribute__(self, '_lh')
@lh.setter
def lh(self, val):
self._dict_impl().__setattr__(self, '_lh', val)
# Double-linked list tail
@property
def lt(self):
dict_ = self._dict_impl()
if not hasattr(self, '_lt'):
dict_.__setattr__(self, '_lt', _nil)
return dict_.__getattribute__(self, '_lt')
@lt.setter
def lt(self, val):
self._dict_impl().__setattr__(self, '_lt', val)
def __getitem__(self, key):
return self._dict_impl().__getitem__(self, key)[1]
def __setitem__(self, key, val):
dict_ = self._dict_impl()
try:
dict_.__getitem__(self, key)[1] = val
except KeyError:
list_ = self._list_factory()
lt = dict_.__getattribute__(self, 'lt')
new = list_([lt, val, _nil])
dict_.__setitem__(self, key, new)
if lt == _nil:
dict_.__setattr__(self, 'lh', key)
else:
dict_.__getitem__(self, lt)[2] = key
dict_.__setattr__(self, 'lt', key)
def __delitem__(self, key):
dict_ = self._dict_impl()
pred, _, succ = dict_.__getitem__(self, key)
if pred == _nil:
dict_.__setattr__(self, 'lh', succ)
else:
dict_.__getitem__(self, pred)[2] = succ
if succ == _nil:
dict_.__setattr__(self, 'lt', pred)
else:
dict_.__getitem__(self, succ)[0] = pred
dict_.__delitem__(self, key)
def __copy__(self):
new = type(self)()
for k, v in self.iteritems():
new[k] = v
new.__dict__.update(self.__dict__)
return new
def __deepcopy__(self, memo):
new = type(self)()
memo[id(self)] = new
for k, v in self.iteritems():
new[k] = copy.deepcopy(v, memo)
for k, v in getattr(self.__dict__, ITER_FUNC)():
setattr(new, k, copy.deepcopy(v, memo))
return new
def __contains__(self, key):
try:
self[key]
return True
except KeyError:
return False
def has_key(self, key):
return key in self
def __len__(self):
return len(self.keys())
def __str__(self):
pairs = ("%r: %r" % (k, v) for k, v in getattr(self, ITER_FUNC)())
return "{%s}" % ", ".join(pairs)
def __repr__(self):
if self:
pairs = (
"(%r, %r)" % (k, v) for k, v in getattr(self, ITER_FUNC)()
)
return "%s([%s])" % (self.__class__.__name__, ", ".join(pairs))
else:
return "%s()" % self.__class__.__name__
def get(self, k, x=None):
if k in self:
return self._dict_impl().__getitem__(self, k)[1]
else:
return x
def __iter__(self):
dict_ = self._dict_impl()
curr_key = dict_.__getattribute__(self, 'lh')
while curr_key != _nil:
yield curr_key
curr_key = dict_.__getitem__(self, curr_key)[2]
iterkeys = __iter__
def keys(self):
return list(self.iterkeys())
def alter_key(self, old_key, new_key):
dict_ = self._dict_impl()
list_ = self._list_factory()
val = dict_.__getitem__(self, old_key)
dict_.__delitem__(self, old_key)
if val[0] != _nil:
prev = dict_.__getitem__(self, val[0])
dict_.__setitem__(self, val[0], list_([prev[0], prev[1], new_key]))
else:
dict_.__setattr__(self, 'lh', new_key)
if val[2] != _nil:
next = dict_.__getitem__(self, val[2])
dict_.__setitem__(self, val[2], list_([new_key, next[1], next[2]]))
else:
dict_.__setattr__(self, 'lt', new_key)
dict_.__setitem__(self, new_key, val)
def itervalues(self):
dict_ = self._dict_impl()
curr_key = dict_.__getattribute__(self, 'lh')
while curr_key != _nil:
_, val, curr_key = dict_.__getitem__(self, curr_key)
yield val
def values(self):
return list(self.itervalues())
def iteritems(self):
dict_ = self._dict_impl()
curr_key = dict_.__getattribute__(self, 'lh')
while curr_key != _nil:
_, val, next_key = dict_.__getitem__(self, curr_key)
yield curr_key, val
curr_key = next_key
def items(self):
return list(self.iteritems())
def sort(self, cmp=None, key=None, reverse=False):
items = [(k, v) for k, v in self.iteritems()]
if cmp is not None:
key = functools.cmp_to_key(cmp)
if key is not None:
items = sorted(items, key=key)
else:
items = sorted(items, key=lambda x: x[1])
if reverse:
items.reverse()
self.clear()
self.__init__(items)
def clear(self):
dict_ = self._dict_impl()
dict_.clear(self)
dict_.__setattr__(self, 'lh', _nil)
dict_.__setattr__(self, 'lt', _nil)
def copy(self):
return self.__class__(self)
def update(self, data=(), **kwds):
if kwds:
raise TypeError(
"update() of ordered dict takes no keyword arguments to avoid "
"an ordering trap."
)
if hasattr(data, ITER_FUNC):
data = getattr(data, ITER_FUNC)()
for key, val in data:
self[key] = val
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, default=_nil):
try:
val = self[key]
del self[key]
return val
except KeyError:
if default == _nil:
raise
return default
def popitem(self):
try:
dict_ = self._dict_impl()
key = dict_.__getattribute__(self, 'lt')
return key, self.pop(key)
except KeyError:
raise KeyError("'popitem(): ordered dictionary is empty'")
def riterkeys(self):
"""To iterate on keys in reversed order.
"""
dict_ = self._dict_impl()
curr_key = dict_.__getattribute__(self, 'lt')
while curr_key != _nil:
yield curr_key
curr_key = dict_.__getitem__(self, curr_key)[0]
__reversed__ = riterkeys
def rkeys(self):
"""List of the keys in reversed order.
"""
return list(self.riterkeys())
def ritervalues(self):
"""To iterate on values in reversed order.
"""
dict_ = self._dict_impl()
curr_key = dict_.__getattribute__(self, 'lt')
while curr_key != _nil:
curr_key, val, _ = dict_.__getitem__(self, curr_key)
yield val
def rvalues(self):
"""List of the values in reversed order.
"""
return list(self.ritervalues())
def riteritems(self):
"""To iterate on (key, value) in reversed order.
"""
dict_ = self._dict_impl()
curr_key = dict_.__getattribute__(self, 'lt')
while curr_key != _nil:
pred_key, val, _ = dict_.__getitem__(self, curr_key)
yield curr_key, val
curr_key = pred_key
def ritems(self):
"""List of the (key, value) in reversed order.
"""
return list(self.riteritems())
def firstkey(self):
if self:
return self._dict_impl().__getattribute__(self, 'lh')
else:
raise KeyError('Ordered dictionary is empty')
@property
def first_key(self):
return self.firstkey()
def lastkey(self):
if self:
return self._dict_impl().__getattribute__(self, 'lt')
else:
raise KeyError('Ordered dictionary is empty')
@property
def last_key(self):
return self.lastkey()
def as_dict(self):
return self._dict_impl()(self.iteritems())
def _repr(self):
"""_repr(): low level repr of the whole data contained in the odict.
Useful for debugging.
"""
dict_ = self._dict_impl()
form = "odict low level repr lh,lt,data: %r, %r, %s"
return form % (
dict_.__getattribute__(self, 'lh'),
dict_.__getattribute__(self, 'lt'),
dict_.__repr__(self)
)
def swap(self, a, b):
if a == b:
raise ValueError('Swap keys are equal')
dict_ = self._dict_impl()
list_ = self._list_factory()
orgin_a = dict_.__getitem__(self, a)
orgin_b = dict_.__getitem__(self, b)
new_a = list_([orgin_b[0], orgin_a[1], orgin_b[2]])
new_b = list_([orgin_a[0], orgin_b[1], orgin_a[2]])
if new_a[0] == a:
new_a[0] = b
new_b[2] = a
if new_b[0] == b:
new_b[0] = a
new_a[2] = b
if new_a[0] != _nil:
dict_.__getitem__(self, new_a[0])[2] = a
if new_a[2] != _nil:
dict_.__getitem__(self, new_a[2])[0] = a
if new_b[0] != _nil:
dict_.__getitem__(self, new_b[0])[2] = b
if new_b[2] != _nil:
dict_.__getitem__(self, new_b[2])[0] = b
dict_.__setitem__(self, a, new_a)
dict_.__setitem__(self, b, new_b)
if new_a[0] == _nil:
dict_.__setattr__(self, 'lh', a)
if new_a[2] == _nil:
dict_.__setattr__(self, 'lt', a)
if new_b[0] == _nil:
dict_.__setattr__(self, 'lh', b)
if new_b[2] == _nil:
dict_.__setattr__(self, 'lt', b)
def insertbefore(self, ref, key, value):
if ref == key:
raise ValueError('Reference key and new key are equal')
try:
index = self.keys().index(ref)
except ValueError:
raise KeyError('Reference key \'{}\' not found'.format(ref))
prevkey = prevval = None
dict_ = self._dict_impl()
list_ = self._list_factory()
if index > 0:
prevkey = self.keys()[index - 1]
prevval = dict_.__getitem__(self, prevkey)
if prevval is not None:
dict_.__getitem__(self, prevkey)[2] = key
newval = list_([prevkey, value, ref])
else:
dict_.__setattr__(self, 'lh', key)
newval = list_([_nil, value, ref])
dict_.__getitem__(self, ref)[0] = key
dict_.__setitem__(self, key, newval)
def insertafter(self, ref, key, value):
if ref == key:
raise ValueError('Reference key and new key are equal')
try:
index = self.keys().index(ref)
except ValueError:
raise KeyError('Reference key \'{}\' not found'.format(ref))
nextkey = nextval = None
keys = self.keys()
dict_ = self._dict_impl()
list_ = self._list_factory()
if index < len(keys) - 1:
nextkey = keys[index + 1]
nextval = dict_.__getitem__(self, nextkey)
if nextval is not None:
dict_.__getitem__(self, nextkey)[0] = key
newval = list_([ref, value, nextkey])
else:
dict_.__setattr__(self, 'lt', key)
newval = list_([ref, value, _nil])
dict_.__getitem__(self, ref)[2] = key
dict_.__setitem__(self, key, newval)
def insertfirst(self, key, value):
keys = self.keys()
if not keys:
self[key] = value
return
self.insertbefore(keys[0], key, value)
def insertlast(self, key, value):
keys = self.keys()
if not keys:
self[key] = value
return
self.insertafter(keys[-1], key, value)
def next_key(self, key):
dict_ = self._dict_impl()
curr = dict_.__getitem__(self, key)
if curr[2] == _nil:
raise KeyError('No next key')
return curr[2]
def prev_key(self, key):
dict_ = self._dict_impl()
curr = dict_.__getitem__(self, key)
if curr[0] == _nil:
raise KeyError('No previous key')
return curr[0]
class odict(_odict, dict):
def _dict_impl(self):
return dict
| 30.359244
| 79
| 0.546052
|
4a088d350a9f58feb7b6bde7c0be17c5439a751c
| 5,764
|
py
|
Python
|
feeders/feeder.py
|
Uason-Chen/SGP-JCA
|
4ea9d4c7b049fe729ea98c86263ba208871beaf1
|
[
"MIT"
] | 3
|
2020-12-28T05:49:14.000Z
|
2021-07-28T07:41:51.000Z
|
feeders/feeder.py
|
Uason-Chen/SGP-JCA
|
4ea9d4c7b049fe729ea98c86263ba208871beaf1
|
[
"MIT"
] | null | null | null |
feeders/feeder.py
|
Uason-Chen/SGP-JCA
|
4ea9d4c7b049fe729ea98c86263ba208871beaf1
|
[
"MIT"
] | 1
|
2022-02-22T10:03:17.000Z
|
2022-02-22T10:03:17.000Z
|
# sys
import os
import sys
import numpy as np
import random
import pickle
# torch
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
# visualization
import time
# operation
from . import tools
class Feeder(torch.utils.data.Dataset):
""" Feeder for skeleton-based action recognition
Arguments:
data_path: the path to '.npy' data, the shape of data should be (N, C, T, V, M)
label_path: the path to label
random_choose: If true, randomly choose a portion of the input sequence
random_shift: If true, randomly pad zeros at the begining or end of sequence
window_size: The length of the output sequence
normalization: If true, normalize input sequence
debug: If true, only use the first 100 samples
"""
def __init__(self,
data_path,
label_path,
random_choose=False,
random_shift=False,
random_move=False,
window_size=-1,
normalization=False,
debug=False,
mmap=False,
vel=False):
self.debug = debug
self.data_path = data_path
self.label_path = label_path
self.random_choose = random_choose
self.random_shift = random_shift
self.random_move = random_move
self.window_size = window_size
self.normalization = normalization
self.vel = vel
self.load_data(mmap)
if normalization:
self.get_mean_map()
def load_data(self, mmap):
# data: N C V T M
# load label
if '.pkl' in self.label_path:
try:
with open(self.label_path) as f:
self.sample_name, self.label = pickle.load(f)
except:
# for pickle file from python2
with open(self.label_path, 'rb') as f:
self.sample_name, self.label = pickle.load(
f, encoding='latin1')
# old label format
elif '.npy' in self.label_path:
self.label = list(np.load(self.label_path))
self.sample_name = [str(i) for i in range(len(self.label))]
else:
raise ValueError()
# load data
if mmap:
self.data = np.load(self.data_path, mmap_mode='r')
else:
self.data = np.load(self.data_path)
if self.debug:
self.label = self.label[0:100]
self.data = self.data[0:100]
self.sample_name = self.sample_name[0:100]
self.N, self.C, self.T, self.V, self.M = self.data.shape
def get_mean_map(self):
data = self.data
N, C, T, V, M = data.shape
self.mean_map = data.mean(
axis=2, keepdims=True).mean(
axis=4, keepdims=True).mean(axis=0)
self.std_map = data.transpose((0, 2, 4, 1, 3)).reshape(
(N * T * M, C * V)).std(axis=0).reshape((C, 1, V, 1))
def __len__(self):
return len(self.label)
def __iter__(self):
return self
def __getitem__(self, index):
# get data
data_numpy = np.array(self.data[index])
label = self.label[index]
# normalization
if self.normalization:
data_numpy = (data_numpy - self.mean_map) / self.std_map
# processing
if self.random_shift:
data_numpy = tools.random_shift(data_numpy)
if self.random_choose:
data_numpy = tools.random_choose(data_numpy, self.window_size)
elif self.window_size > 0:
data_numpy = tools.auto_pading(data_numpy, self.window_size)
if self.random_move:
data_numpy = tools.random_move(data_numpy)
if self.vel:
data_numpy[:, :-1] = data_numpy[:, 1:] - data_numpy[:, :-1]
return data_numpy, label, index
def top_k(self, score, top_k):
rank = score.argsort()
hit_top_k = [l in rank[i, -top_k:] for i, l in enumerate(self.label)]
return sum(hit_top_k) * 1.0 / len(hit_top_k)
def top_k_by_category(self, score, top_k):
return tools.top_k_by_category(self.label, score, top_k)
def calculate_recall_precision(self, score):
return tools.calculate_recall_precision(self.label, score)
def test(data_path, label_path, vid=None):
import matplotlib.pyplot as plt
loader = torch.utils.data.DataLoader(
dataset=Feeder(data_path, label_path),
batch_size=64,
shuffle=False,
num_workers=2)
if vid is not None:
sample_name = loader.dataset.sample_name
sample_id = [name.split('.')[0] for name in sample_name]
index = sample_id.index(vid)
data, label = loader.dataset[index]
data = data.reshape((1, ) + data.shape)
# for batch_idx, (data, label) in enumerate(loader):
N, C, T, V, M = data.shape
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
pose, = ax.plot(np.zeros(V * M), np.zeros(V * M), 'g^')
ax.axis([-1, 1, -1, 1])
for n in range(N):
for t in range(T):
x = data[n, 0, t, :, 0]
y = data[n, 1, t, :, 0]
z = data[n, 2, t, :, 0]
pose.set_xdata(x)
pose.set_ydata(y)
fig.canvas.draw()
plt.pause(1)
if __name__ == '__main__':
data_path = "./data/NTU-RGB-D/xview/val_data.npy"
label_path = "./data/NTU-RGB-D/xview/val_label.pkl"
test(data_path, label_path, vid='S003C001P017R001A044')
| 31.326087
| 87
| 0.57304
|
4a088ed00893de6930e54a5e1c669bdc6a16e6a8
| 789
|
py
|
Python
|
thirdparty/Sumo/examples/rou/edit_vehicle.py
|
Tsinghua-OpenICV/carla_icv_bridge
|
4d5f8c26b1847dbb16a81fe43f146bf4a9a8da5e
|
[
"MIT"
] | null | null | null |
thirdparty/Sumo/examples/rou/edit_vehicle.py
|
Tsinghua-OpenICV/carla_icv_bridge
|
4d5f8c26b1847dbb16a81fe43f146bf4a9a8da5e
|
[
"MIT"
] | null | null | null |
thirdparty/Sumo/examples/rou/edit_vehicle.py
|
Tsinghua-OpenICV/carla_icv_bridge
|
4d5f8c26b1847dbb16a81fe43f146bf4a9a8da5e
|
[
"MIT"
] | 1
|
2020-12-19T05:48:01.000Z
|
2020-12-19T05:48:01.000Z
|
from lxml import etree
rou_xml=etree.parse('/home/yining/CARLA_0.9.9/Co-Simulation/Sumo/examples/rou/Town04.rou.xml')
root=rou_xml.getroot()
print(root.items())
print(root.keys())
print(root.xpath('//vehicle'))
vehicle_list=root.xpath('//vehicle')
print(len(vehicle_list))
#departPos="random" departSpeed="0.00" arrivalPos="random" arrivalSpeed="0.00">
for node in root.xpath('//vehicle'):
node.set("departPos","random")
node.attrib["departPos"]="random"
node.attrib["departSpeed"]="0.00"
node.attrib["arrivalPos"]="random"
node.attrib["arrivalSpeed"]="0.00"
tree=etree.ElementTree(root)
print(etree.tostring(rou_xml))
tree.write('/home/yining/CARLA_0.9.9/Co-Simulation/Sumo/examples/rou/Town04_flow.rou.xml',pretty_print=True,xml_declaration=True, encoding='utf-8')
| 37.571429
| 147
| 0.73891
|
4a0891d857ed155049492c65c776e38aa2619ce7
| 3,761
|
py
|
Python
|
pyexocross/hitran/hitran.py
|
ucl-exoplanets/pyexocross
|
703341cd0fddafcbb04e935c89ddc9d02dda9f59
|
[
"BSD-3-Clause"
] | null | null | null |
pyexocross/hitran/hitran.py
|
ucl-exoplanets/pyexocross
|
703341cd0fddafcbb04e935c89ddc9d02dda9f59
|
[
"BSD-3-Clause"
] | null | null | null |
pyexocross/hitran/hitran.py
|
ucl-exoplanets/pyexocross
|
703341cd0fddafcbb04e935c89ddc9d02dda9f59
|
[
"BSD-3-Clause"
] | 1
|
2021-01-15T12:54:04.000Z
|
2021-01-15T12:54:04.000Z
|
import numpy as np
from ..linelist import Linelist
from ..broadener import Broadener
import pathlib
class HITRANSelfBroadener(Broadener):
def __init__(self, ratio=1.0):
super().__init__(ratio=ratio)
@property
def species(self):
return 'self'
def calculate_gamma(self, transitions):
from ..util import compute_gamma
return compute_gamma(transitions['gamma_self'].values,1.0, self.T, self.P,
296.0, 1.0)
class HITRANAirBroadener(Broadener):
def __init__(self, ratio=1.0):
super().__init__(ratio=ratio)
@property
def species(self):
return 'air'
def calculate_gamma(self, transitions):
from ..util import compute_gamma
return compute_gamma(transitions['gamma_air'].values,transitions['n_air'].values, self.T, self.P,
296.0, 1.0)
class HITRANLinelist(Linelist):
def __init__(self,filename, iso_abundances=None
):
super().__init__()
self._filename = filename
self.load_hitran_file(filename)
filesize = pathlib.Path(filename).stat().st_size
self._total_transitions = filesize//(self._total_line)
@property
def totalTransitions(self):
return self._total_transitions
def add_self_broadener(self, ratio=1.0):
if 'self' not in self._broadeners:
self.add_broadener(HITRANSelfBroadener(ratio=ratio))
def add_air_broadener(self, ratio=1.0):
if 'air' not in self._broadeners:
self.add_broadener(HITRANAirBroadener(ratio=ratio))
def load_hitran_file(self, filename):
from .hapi import molecularMass
with open(filename,'r') as f:
line = f.readline()
self._molid = int(line[:2])
self._total_line = len(line)+1
self.discover_iso()
def discover_iso(self):
from .hapi import ISO, molecularMass, abundance
self._isotopalogues = np.array([k[1] for k in ISO if k[0] == self._molid],dtype=np.int)
max_iso = self._isotopalogues.max()
self._molmasses = np.empty(shape=(max_iso))
self._abundance_vals = np.empty(shape=(max_iso))
for iso in self._isotopalogues:
self._molmasses[iso-1] = molecularMass(self._molid,iso)
self._abundance_vals[iso-1] = abundance(self._molid, iso)
self._abundance_vals/=self._abundance_vals.sum()
@property
def molecule(self):
from .hapi import moleculeName
return moleculeName(self._molid)
def compute_partition(self, temperature, df):
from .hapi import partitionSum
isoid = df['IsoID'].values -1
return self._iso_partition[isoid]
def compute_doppler(self, temperature, df):
from ..util import doppler_broad
import math
freq = df['v_if'].values
masses = self._molmasses[df['IsoID'].values-1]
return doppler_broad(freq, masses, temperature)
def get_transitions(self,min_wn, max_wn, chunksize=10000):
from .util import read_hitran_pandas
yield from read_hitran_pandas(self._filename, chunksize=chunksize)
def get_abundance(self, df):
abundances = self._abundance_vals[df['IsoID'].values-1]
return abundances
def transitions(self, wngrid, temperature, pressure,pf=None, wing_cutoff=25.0, chunksize=10000, threshold=1e-34):
from .hapi import partitionSum
self._iso_partition = np.array([partitionSum(self._molid, x+1, temperature)
for x in range(self._molmasses.shape[0])])
yield from super().transitions(wngrid, temperature, pressure,pf, wing_cutoff, chunksize, threshold)
| 33.882883
| 117
| 0.643978
|
4a0891ee485d6e5640c13d841e8691ba98aef3c2
| 1,224
|
py
|
Python
|
src/mention.py
|
edvisees/EDL2015
|
e9d0764277717ccf21538ba30481ea9d1b2d914b
|
[
"Apache-2.0"
] | null | null | null |
src/mention.py
|
edvisees/EDL2015
|
e9d0764277717ccf21538ba30481ea9d1b2d914b
|
[
"Apache-2.0"
] | null | null | null |
src/mention.py
|
edvisees/EDL2015
|
e9d0764277717ccf21538ba30481ea9d1b2d914b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# 5 types of entities: GPE, ORG, PER + LOC, FAC (and TTL title?)
class Mention:
def __init__(self, word, begin, end, ner, name, link):
self.word = word
self.begin = begin
self.end = end
self.ner = ner
self.name = name
self.mention_type = "NAM"
self.value = "1.0"
self.link = link
# WIP: in the moment of printing the Mention we should know the Mention_id
# TODO: mention_id: should be unique not by document, but by test/report
def printMention(self, mention_id):
self.mention_id = mention_id
p = []
p.append('CMU_Edvisees_1')
p.append('QUERY' + str(self.mention_id).zfill(4)) # or 5?
p.append(self.word)
p.append("%s:%s-%s" % (self.name, self.begin, self.end))
p.append(self.link)
p.append(self.ner)
p.append(self.mention_type)
p.append(self.value)
print '\t'.join(p)
def get_kb_entity_str(self):
if self.kb_entity: # TODO: it is a list, make it a class/struct?
return '%s <name = "%s">' % (self.kb_entity[0], ' / '.join(set(self.kb_entity[1])))
else:
return "NIL"
| 33.081081
| 95
| 0.571895
|
4a089224207a0894fcdb533bb3ffd9b523ea3393
| 15,929
|
py
|
Python
|
src/skmultiflow/evaluation/evaluate_holdout.py
|
tlac980/scikit-multiflow
|
e05a512f3170555767cf229a4f7b5fed2423c86c
|
[
"BSD-3-Clause"
] | null | null | null |
src/skmultiflow/evaluation/evaluate_holdout.py
|
tlac980/scikit-multiflow
|
e05a512f3170555767cf229a4f7b5fed2423c86c
|
[
"BSD-3-Clause"
] | null | null | null |
src/skmultiflow/evaluation/evaluate_holdout.py
|
tlac980/scikit-multiflow
|
e05a512f3170555767cf229a4f7b5fed2423c86c
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import warnings
import re
from timeit import default_timer as timer
from numpy import unique
from skmultiflow.evaluation.base_evaluator import StreamEvaluator
from skmultiflow.utils import constants, get_dimensions
class EvaluateHoldout(StreamEvaluator):
""" The holdout evaluation method or periodic holdout evaluation method.
Analyses each arriving sample by updating its statistics, without computing
performance metrics, nor predicting labels or regression values.
The performance evaluation happens at every n_wait analysed samples, at which
moment the evaluator will test the learners performance on a test set, formed
by yet unseen samples, which will be used to evaluate performance, but not to
train the model.
It's possible to use the same test set for every test made or to dynamically
create test sets, so that they differ from each other. If dynamic test sets
are enabled, we use the data stream to create test sets on the go. This process
is more likely to generate test sets that follow the current concept, in
comparison to static test sets.
Thus, if concept drift is known to be present in the stream, using dynamic
test sets is recommended. If no concept drift is expected, disabling this
parameter will speed up the evaluation process.
Parameters
----------
n_wait: int (Default: 10000)
The number of samples to process between each test. Also defines when to update the plot if `show_plot=True`.
Note that setting `n_wait` too small can significantly slow the evaluation process.
max_samples: int (Default: 100000)
The maximum number of samples to process during the evaluation.
batch_size: int (Default: 1)
The number of samples to pass at a time to the model(s).
max_time: float (Default: float("inf"))
The maximum duration of the simulation (in seconds).
metrics: list, optional (Default: ['accuracy', 'kappa'])
| The list of metrics to track during the evaluation. Also defines the metrics that will be displayed in plots
and/or logged into the output file. Valid options are
| **Classification**
| 'accuracy'
| 'kappa'
| 'kappa_t'
| 'kappa_m'
| 'true_vs_predicted'
| 'precision'
| 'recall'
| 'f1'
| 'gmean'
| **Multi-target Classification**
| 'hamming_score'
| 'hamming_loss'
| 'exact_match'
| 'j_index'
| **Regression**
| 'mean_square_error'
| 'mean_absolute_error'
| 'true_vs_predicted'
| **Multi-target Regression**
| 'average_mean_squared_error'
| 'average_mean_absolute_error'
| 'average_root_mean_square_error'
| **Experimental**
| 'running_time'
| 'model_size'
output_file: string, optional (Default: None)
File name to save the summary of the evaluation.
show_plot: bool (Default: False)
If True, a plot will show the progress of the evaluation. Warning: Plotting can slow down the evaluation
process.
restart_stream: bool, optional (Default=True)
If True, the stream is restarted once the evaluation is complete.
test_size: int (Default: 5000)
The size of the test set.
dynamic_test_set: bool (Default: False)
If `True`, will continuously change the test set, otherwise will use the same test set for all tests.
Notes
-----
1. This evaluator can process a single learner to track its performance; or multiple learners at a time, to
compare different models on the same stream.
Examples
--------
>>> # The first example demonstrates how to evaluate one model
>>> from skmultiflow.data import SEAGenerator
>>> from skmultiflow.trees import HoeffdingTree
>>> from skmultiflow.evaluation import EvaluateHoldout
>>>
>>> # Set the stream
>>> stream = SEAGenerator(random_state=1)
>>> stream.prepare_for_use()
>>>
>>> # Set the model
>>> ht = HoeffdingTree()
>>>
>>> # Set the evaluator
>>> evaluator = EvaluateHoldout(max_samples=100000,
>>> max_time=1000,
>>> show_plot=True,
>>> metrics=['accuracy', 'kappa'],
>>> dynamic_test_set=True)
>>>
>>> # Run evaluation
>>> evaluator.evaluate(stream=stream, model=ht, model_names=['HT'])
>>> # The second example demonstrates how to compare two models
>>> from skmultiflow.data import SEAGenerator
>>> from skmultiflow.trees import HoeffdingTree
>>> from skmultiflow.bayes import NaiveBayes
>>> from skmultiflow.evaluation import EvaluateHoldout
>>>
>>> # Set the stream
>>> stream = SEAGenerator(random_state=1)
>>> stream.prepare_for_use()
>>>
>>> # Set the model
>>> ht = HoeffdingTree()
>>> nb = NaiveBayes()
>>>
>>> # Set the evaluator
>>> evaluator = EvaluateHoldout(max_samples=100000,
>>> max_time=1000,
>>> show_plot=True,
>>> metrics=['accuracy', 'kappa'],
>>> dynamic_test_set=True)
>>>
>>> # Run evaluation
>>> evaluator.evaluate(stream=stream, model=[ht, nb], model_names=['HT', 'NB'])
"""
def __init__(self,
n_wait=10000,
max_samples=100000,
batch_size=1,
max_time=float("inf"),
metrics=None,
output_file=None,
show_plot=False,
restart_stream=True,
test_size=5000,
dynamic_test_set=False):
super().__init__()
self._method = 'holdout'
self.n_wait = n_wait
self.max_samples = max_samples
self.batch_size = batch_size
self.max_time = max_time
self.output_file = output_file
self.show_plot = show_plot
if metrics is None:
self.metrics = [constants.ACCURACY, constants.KAPPA]
else:
self.metrics = metrics
self.restart_stream = restart_stream
# Holdout parameters
self.dynamic_test_set = dynamic_test_set
if test_size < 0:
raise ValueError('test_size has to be greater than 0.')
else:
self.test_size = test_size
self.n_sliding = test_size
warnings.filterwarnings("ignore", ".*invalid value encountered in true_divide.*")
warnings.filterwarnings("ignore", ".*Passing 1d.*")
def evaluate(self, stream, model, model_names=None):
""" Evaluates a learner or set of learners on samples from a stream.
Parameters
----------
stream: Stream
The stream from which to draw the samples.
model: StreamModel or list
The learner or list of learners to evaluate.
model_names: list, optional (Default=None)
A list with the names of the learners.
Returns
-------
StreamModel or list
The trained learner(s).
"""
# First off we need to verify if this is a simple evaluation task or a comparison between learners task.
self._init_evaluation(model=model, stream=stream, model_names=model_names)
if self._check_configuration():
self._reset_globals()
# Initialize metrics and outputs (plots, log files, ...)
self._init_metrics()
self._init_plot()
self._init_file()
self.model = self._periodic_holdout()
if self.show_plot:
self.visualizer.hold()
return self.model
def _periodic_holdout(self):
""" Method to control the holdout evaluation.
"""
self._start_time = timer()
self._end_time = timer()
print('Holdout Evaluation')
print('Evaluating {} target(s).'.format(self.stream.n_targets))
actual_max_samples = self.stream.n_remaining_samples()
if actual_max_samples == -1 or actual_max_samples > self.max_samples:
actual_max_samples = self.max_samples
first_run = True
if not self.dynamic_test_set:
print('Separating {} holdout samples.'.format(self.test_size))
self.X_test, self.y_test = self.stream.next_sample(self.test_size)
self.global_sample_count += self.test_size
performance_sampling_cnt = 0
print('Evaluating...')
while ((self.global_sample_count < self.max_samples) & (self._end_time - self._start_time < self.max_time)
& (self.stream.has_more_samples())):
try:
X, y = self.stream.next_sample(self.batch_size)
if X is not None and y is not None:
self.global_sample_count += self.batch_size
# Train
if first_run:
for i in range(self.n_models):
if self._task_type == constants.CLASSIFICATION:
self.running_time_measurements[i].compute_training_time_begin()
self.model[i].partial_fit(X=X, y=y, classes=self.stream.target_values)
self.running_time_measurements[i].compute_training_time_end()
elif self._task_type == constants.MULTI_TARGET_CLASSIFICATION:
self.running_time_measurements[i].compute_training_time_begin()
self.model[i].partial_fit(X=X, y=y, classes=unique(self.stream.target_values))
self.running_time_measurements[i].compute_training_time_end()
else:
self.running_time_measurements[i].compute_training_time_begin()
self.model[i].partial_fit(X=X, y=y)
self.running_time_measurements[i].compute_training_time_end()
self.running_time_measurements[i].update_time_measurements(self.batch_size)
first_run = False
else:
for i in range(self.n_models):
# Compute running time
self.running_time_measurements[i].compute_training_time_begin()
self.model[i].partial_fit(X, y)
self.running_time_measurements[i].compute_training_time_end()
self.running_time_measurements[i].update_time_measurements(self.batch_size)
self._check_progress(actual_max_samples) # TODO Confirm place
# Test on holdout set
if self.dynamic_test_set:
perform_test = self.global_sample_count >= (self.n_wait * (performance_sampling_cnt + 1)
+ (self.test_size * performance_sampling_cnt))
else:
perform_test = (self.global_sample_count - self.test_size) % self.n_wait == 0
if perform_test | (self.global_sample_count >= self.max_samples):
if self.dynamic_test_set:
print('Separating {} holdout samples.'.format(self.test_size))
self.X_test, self.y_test = self.stream.next_sample(self.test_size)
self.global_sample_count += get_dimensions(self.X_test)[0]
# Test
if (self.X_test is not None) and (self.y_test is not None):
prediction = [[] for _ in range(self.n_models)]
for i in range(self.n_models):
try:
self.running_time_measurements[i].compute_testing_time_begin()
prediction[i].extend(self.model[i].predict(self.X_test))
self.running_time_measurements[i].compute_testing_time_end()
self.running_time_measurements[i].update_time_measurements(self.test_size)
except TypeError:
raise TypeError("Unexpected prediction value from {}"
.format(type(self.model[i]).__name__))
if prediction is not None:
for j in range(self.n_models):
for i in range(len(prediction[0])):
self.mean_eval_measurements[j].add_result(self.y_test[i],
prediction[j][i])
self.current_eval_measurements[j].add_result(self.y_test[i],
prediction[j][i])
self._update_metrics()
performance_sampling_cnt += 1
self._end_time = timer()
except BaseException as exc:
print(exc)
if exc is KeyboardInterrupt:
self._update_metrics()
break
# Flush file buffer, in case it contains data
self._flush_file_buffer()
self.evaluation_summary()
if self.restart_stream:
self.stream.restart()
return self.model
def partial_fit(self, X, y, classes=None, sample_weight=None):
""" Partially fit all the learners on the given data.
Parameters
----------
X: Numpy.ndarray of shape (n_samples, n_features)
The data upon which the algorithm will create its model.
y: Array-like
An array-like containing the classification labels / target values for all samples in X.
classes: list
Stores all the classes that may be encountered during the classification task. Not used for regressors.
sample_weight: Array-like
Samples weight. If not provided, uniform weights are assumed.
Returns
-------
EvaluateHoldout
self
"""
if self.model is not None:
for i in range(self.n_models):
if self._task_type == constants.CLASSIFICATION or \
self._task_type == constants.MULTI_TARGET_CLASSIFICATION:
self.model[i].partial_fit(X=X, y=y, classes=classes, sample_weight=sample_weight)
else:
self.model[i].partial_fit(X=X, y=y, sample_weight=sample_weight)
return self
else:
return self
def predict(self, X):
""" Predicts with the estimator(s) being evaluated.
Parameters
----------
X: Numpy.ndarray of shape (n_samples, n_features)
All the samples we want to predict the label for.
Returns
-------
list of numpy.ndarray
Model(s) predictions
"""
predictions = None
if self.model is not None:
predictions = []
for i in range(self.n_models):
predictions.append(self.model[i].predict(X))
return predictions
def get_info(self):
info = self.__repr__()
if self.output_file is not None:
_, filename = os.path.split(self.output_file)
info = re.sub(r"output_file=(.\S+),", "output_file='{}',".format(filename), info)
return info
| 40.326582
| 118
| 0.569025
|
4a0892cf3f7baac92366630b645f4dc8c61d8408
| 2,120
|
py
|
Python
|
hakoblog/web.py
|
hakobe/hakoblog-python
|
61823d5cb538e40917423740d5fe06195ed5c858
|
[
"MIT"
] | 13
|
2017-01-18T12:40:07.000Z
|
2019-08-24T05:34:06.000Z
|
hakoblog/web.py
|
hakobe/hakoblog-python
|
61823d5cb538e40917423740d5fe06195ed5c858
|
[
"MIT"
] | 1
|
2017-01-20T10:10:37.000Z
|
2017-01-20T10:10:37.000Z
|
hakoblog/web.py
|
hakobe/hakoblog-python
|
61823d5cb538e40917423740d5fe06195ed5c858
|
[
"MIT"
] | null | null | null |
from flask import (
Flask,
g as flask_g,
render_template,
request,
abort,
url_for,
redirect,
)
from hakoblog.db import DB
from hakoblog.config import CONFIG
from hakoblog.action.blog import BlogAction
from hakoblog.loader.entry import EntryLoader
from hakoblog.action.entry import EntryAction
web = Flask(__name__)
web.config.from_object(CONFIG)
def get_db():
db = getattr(flask_g, "_database", None)
if db is None:
db = flask_g._database = DB()
return db
@web.teardown_appcontext
def close_connection(exception):
db = getattr(flask_g, "_database", None)
if db is not None:
db.close()
@web.after_request
def add_secure_headers(response):
print(response)
response.headers.add("X-Frame-Options", "DENY")
response.headers.add("X-Content-Type-Options", "nosniff")
response.headers.add("X-XSS-Protection", "1;mode=block")
response.headers.add("Content-Security-Policy", "default-src 'self'")
return response
@web.route("/")
def index():
blog = BlogAction.ensure_global_blog_created(get_db())
entries = EntryLoader.find_entries(get_db(), blog.id, limit=5)
return render_template("index.html", blog=blog, entries=entries)
@web.route("/entry/<int:entry_id>")
def entry(entry_id):
blog = BlogAction.ensure_global_blog_created(get_db())
entry = EntryLoader.find_by_id(get_db(), entry_id)
if entry is None:
abort(404)
if entry.blog_id != blog.id:
abort(403)
return render_template("entry.html", blog=blog, entry=entry)
@web.route("/-/post", methods=["GET"])
def post_get():
blog = BlogAction.ensure_global_blog_created(get_db())
return render_template("post.html", blog=blog)
@web.route("/-/post", methods=["POST"])
def post_post():
blog = BlogAction.ensure_global_blog_created(get_db())
title = request.form["title"]
body = request.form["body"]
blog_id = int(request.form["blog_id"])
if int(blog_id) != blog.id:
abort(400)
EntryAction.post(get_db(), blog_id=blog.id, title=title, body=body)
return redirect(url_for("index"))
| 24.090909
| 73
| 0.688208
|
4a089503f783993979e3f4dd54a72ae297f00d2a
| 1,022
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/express_route_circuit_peering_paged.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 4
|
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
azure/mgmt/network/v2017_09_01/models/express_route_circuit_peering_paged.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure/mgmt/network/v2017_09_01/models/express_route_circuit_peering_paged.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 3
|
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class ExpressRouteCircuitPeeringPaged(Paged):
"""
A paging container for iterating over a list of :class:`ExpressRouteCircuitPeering <azure.mgmt.network.v2017_09_01.models.ExpressRouteCircuitPeering>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[ExpressRouteCircuitPeering]'}
}
def __init__(self, *args, **kwargs):
super(ExpressRouteCircuitPeeringPaged, self).__init__(*args, **kwargs)
| 36.5
| 161
| 0.609589
|
4a0895349baa18b3b704b859e66c9758cdd55b1b
| 13,066
|
py
|
Python
|
codeforces/556D_fug_fastlist2.py
|
snsokolov/contests
|
ae02ea872ca91ef98630cc172a844b82cc56f621
|
[
"Unlicense"
] | 1
|
2015-08-31T05:09:02.000Z
|
2015-08-31T05:09:02.000Z
|
codeforces/556D_fug_fastlist2.py
|
snsokolov/contests
|
ae02ea872ca91ef98630cc172a844b82cc56f621
|
[
"Unlicense"
] | null | null | null |
codeforces/556D_fug_fastlist2.py
|
snsokolov/contests
|
ae02ea872ca91ef98630cc172a844b82cc56f621
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
# 556D_fug.py - Codeforces.com/problemset/problem/556/D Fug quiz by Sergey 2015
# Standard modules
import unittest
import sys
import re
# Additional modules
import bisect
###############################################################################
# Fastlist Class
###############################################################################
class Fastlist(object):
""" Fastlist representation """
def __init__(self, l=[], load=5000, sorted=0):
self._load = load
self._sorted = sorted
self._lists = []
self._starts = []
self._mins = []
self._insert_list()
self._irev = 0
self._ii = 0
self._il = 0
self.extend(l)
def _index_location(self, index):
if len(self._lists[0]) == 0:
raise IndexError("List index out of range")
if index == 0:
return (0, 0)
if index == -1:
return (len(self._lists) - 1, len(self._lists[-1]) - 1)
if self._sorted:
raise RuntimeError("No index access to the sorted list, exc 0, -1")
length = len(self)
if index < 0:
index = length + index
if index >= length:
raise IndexError("List index out of range")
il = bisect.bisect_right(self._starts, index) - 1
return (il, index - self._starts[il])
def _insert_list(self, il=None):
if il is None:
il = len(self._lists)
self._lists.insert(il, [])
if self._sorted:
if il == 0:
self._mins.insert(il, None)
else:
self._mins.insert(il, self._lists[il-1][-1])
else:
if il == 0:
self._starts.insert(il, 0)
else:
start = self._starts[il-1] + len(self._lists[il-1])
self._starts.insert(il, start)
def _del_list(self, il):
del self._lists[il]
if self._sorted:
del self._mins[il]
else:
del self._starts[il]
def _rebalance(self, il):
illen = len(self._lists[il])
if illen >= self._load * 2:
self._insert_list(il)
self._even_lists(il)
if illen <= self._load * 0.2:
if il != 0:
self._even_lists(il-1)
elif len(self._lists) > 1:
self._even_lists(il)
def _even_lists(self, il):
tot = len(self._lists[il]) + len(self._lists[il+1])
if tot < self._load * 1:
self._lists[il] += self._lists[il+1]
self._del_list(il+1)
if self._sorted:
self._mins[il] = self._lists[il][0]
else:
half = tot//2
ltot = self._lists[il] + self._lists[il+1]
self._lists[il] = ltot[:half]
self._lists[il+1] = ltot[half:]
if self._sorted:
self._mins[il] = self._lists[il][0]
self._mins[il+1] = self._lists[il+1][0]
else:
self._starts[il+1] = self._starts[il] + len(self._lists[il])
def _obj_location(self, obj, l=0):
if not self._sorted:
raise RuntimeError("No by-value access to an unserted list")
il = 0
if len(self._mins) > 1 and obj > self._mins[0]:
if l:
il = bisect.bisect_left(self._mins, obj) - 1
else:
il = bisect.bisect_right(self._mins, obj) - 1
if l:
ii = bisect.bisect_left(self._lists[il], obj)
else:
ii = bisect.bisect_right(self._lists[il], obj)
return (il, ii)
def insert(self, index, obj):
(il, ii) = self._index_location(index)
self._lists[il].insert(ii, obj)
for j in range(il + 1, len(self._starts)):
self._starts[j] += 1
self._rebalance(il)
def append(self, obj):
if len(self._lists[-1]) >= self._load:
self._insert_list()
self._lists[-1].append(obj)
if self._sorted and self._mins[0] is None:
self._mins[0] = self._lists[0][0]
def extend(self, iter):
for n in iter:
self.append(n)
def pop(self, index=None):
if index is None:
index = -1
(il, ii) = self._index_location(index)
item = self._lists[il].pop(ii)
if self._sorted:
if ii == 0 and len(self._lists[il]) > 0:
self._mins[il] = self._lists[il][0]
else:
for j in range(il + 1, len(self._starts)):
self._starts[j] -= 1
self._rebalance(il)
return item
def clear(self):
self._lists.clear()
self._starts.clear()
self._mins.clear()
self._insert_list()
def as_list(self):
return sum(self._lists, [])
def insort(self, obj, l=0):
(il, ii) = self._obj_location(obj, l)
self._lists[il].insert(ii, obj)
if ii == 0:
self._mins[il] = obj
self._rebalance(il)
def insort_left(self, obj):
self.insort(obj, l=1)
def add(self, obj):
if self._sorted:
self.insort(obj)
else:
self.append(obj)
def __str__(self):
return str(self.as_list())
def __setitem__(self, index, obj):
if isinstance(index, int):
(il, ii) = self._index_location(index)
self._lists[il][ii] = obj
elif isinstance(index, slice):
raise RuntimeError("Slice assignment is not supported")
def __getitem__(self, index):
if isinstance(index, int):
(il, ii) = self._index_location(index)
return self._lists[il][ii]
elif isinstance(index, slice):
rg = index.indices(len(self))
if rg[0] == 0 and rg[1] == len(self) and rg[2] == 1:
return self.as_list()
return [self.__getitem__(index) for index in range(*rg)]
def __iadd__(self, obj):
if self._sorted:
[self.insort(n) for n in obj]
else:
[self.append(n) for n in obj]
return self
def __delitem__(self, index):
if isinstance(index, int):
self.pop(index)
elif isinstance(index, slice):
rg = index.indices(len(self))
[self.__delitem__(rg[0]) for i in range(*rg)]
def __len__(self):
if self._sorted:
return sum([len(l) for l in self._lists])
return self._starts[-1] + len(self._lists[-1])
def __contains__(self, obj):
if self._sorted:
it = self.lower_bound(obj)
return not it.iter_end() and obj == it.iter_getitem()
else:
for n in self:
if obj == n:
return True
return False
def __bool__(self):
return len(self._lists[0]) != 0
def __iter__(self):
if not self._irev:
self._il = self._ii = 0
else:
self._il = len(self._lists) - 1
self._ii = len(self._lists[self._il]) - 1
return self
def __reversed__(self):
self._irev = 1
self.__iter__()
return self
def _iter_fix(self):
if not self._irev:
if (self._il != len(self._lists) - 1 and
self._ii == len(self._lists[self._il])):
self._il += 1
self._ii = 0
else:
if self._il != 0 and self._ii == -1:
self._il -= 1
self._ii = len(self._lists[self._il]) - 1
def __next__(self):
item = self.iter_getitem()
if not self._irev:
self._ii += 1
else:
self._ii -= 1
return item
def iter_end(self):
if not self._irev:
return (self._il == len(self._lists) - 1 and
self._ii == len(self._lists[self._il]))
else:
return (self._il == 0 and self._ii == -1)
def iter_getitem(self):
if self.iter_end() or len(self._lists[0]) == 0:
raise StopIteration("Iteration stopped")
self._iter_fix()
return self._lists[self._il][self._ii]
def iter_del(self):
item = self._lists[self._il].pop(self._ii)
if self._sorted:
if self._ii == 0 and len(self._lists[self._il]) > 0:
self._mins[self._il] = self._lists[self._il][0]
else:
for j in range(self._il + 1, len(self._starts)):
self._starts[j] -= 1
self._rebalance(self._il)
return item
def lower_bound(self, obj):
(self._il, self._ii) = self._obj_location(obj, l=1)
return self
def upper_bound(self, obj):
(self._il, self._ii) = self._obj_location(obj)
return self
###############################################################################
# Fug Class
###############################################################################
class Fug:
""" Fug representation """
def __init__(self, args):
""" Default constructor """
self.gsrt = args[0]
self.asrt = args[1]
self.gn = args[2]
self.result = [0]*self.gn
self.a = Fastlist(self.asrt, load=500, sorted=1)
def calculate(self):
""" Main calcualtion function of the class """
for i in range(self.gn):
g = self.gsrt[i]
it = self.a.lower_bound((g[1], 0))
if not it.iter_end():
alb = it.iter_getitem()
if alb[0] > g[0]:
return "No"
self.result[g[2]] = alb[1]+1
it.iter_del()
else:
return "No"
answer = "Yes\n" + " ".join(map(str, self.result))
return answer
###############################################################################
# Executable code
###############################################################################
def get_inputs(test_inputs=None):
it = iter(test_inputs.split("\n")) if test_inputs else None
def uinput():
""" Unit-testable input function wrapper """
if it:
return next(it)
else:
return sys.stdin.readline()
# Getting string inputs. Place all uinput() calls here
num = list(map(int, uinput().split()))
gaps = []
prevli = list(map(int, uinput().split()))
for i in range(num[0] - 1):
li = list(map(int, uinput().split()))
min = li[0] - prevli[1]
max = li[1] - prevli[0]
gaps.append((max, min, i))
prevli = li
a = list(map(int, uinput().split()))
alist = [(n, i) for i, n in enumerate(a)]
# Decoding inputs into a list
inputs = [sorted(gaps), sorted(alist), num[0]-1]
return inputs
def calculate(test_inputs=None):
""" Base class calculate method wrapper """
return Fug(get_inputs(test_inputs)).calculate()
###############################################################################
# Unit Tests
###############################################################################
class unitTests(unittest.TestCase):
def test_sample_tests(self):
""" Quiz sample tests. Add \n to separate lines """
# Sample test 1
test = "4 4\n1 4\n7 8\n9 10\n12 14\n4 5 3 8"
self.assertEqual(calculate(test), "Yes\n2 3 1")
self.assertEqual(
get_inputs(test),
[[(3, 1, 1), (5, 2, 2), (7, 3, 0)],
[(3, 2), (4, 0), (5, 1), (8, 3)], 3])
# My tests
test = "5 5\n1 1\n2 7\n8 8\n10 10\n16 16\n1 1 5 6 2"
self.assertEqual(calculate(test), "Yes\n1 2 5 4")
# Other tests
test = "2 2\n11 14\n17 18\n2 9"
self.assertEqual(calculate(test), "No")
# Other tests
test = (
"2 1\n1 1\n1000000000000000000 1000000000000000000" +
"\n999999999999999999")
self.assertEqual(calculate(test), "Yes\n1")
test = ("5 9\n1 2\n3 3\n5 7\n11 13\n14 20\n2 3 4 10 6 2 6 9 5")
self.assertEqual(calculate(test), "Yes\n1 6 3 2")
size = 2000
test = str(size) + " " + str(size) + "\n"
x = size*1000
for i in range(size):
test += str(x) + " " + str(x + i + 1) + "\n"
x += 2 * (i + 1)
for i in reversed(range(size)):
test += str(i) + " "
self.assertEqual(calculate(test)[0], "Y")
def test_Fug_class__basic_functions(self):
""" Fug class basic functions testing """
# Constructor test
d = Fug([[(1, 3, 1), (2, 5, 2), (3, 7, 0)],
[(3, 2), (4, 0), (5, 1), (8, 3)], 3])
# Sort bridges
self.assertEqual(d.asrt[0], (3, 2))
# Sort Gaps
self.assertEqual(d.gsrt[0], (1, 3, 1))
if __name__ == "__main__":
# Avoiding recursion limitaions
sys.setrecursionlimit(100000)
if sys.argv[-1] == "-ut":
unittest.main(argv=[" "])
# Print the result string
sys.stdout.write(calculate())
| 30.036782
| 79
| 0.490663
|
4a08955b06b7f355347de59ad5d80c800f9df99e
| 42,449
|
py
|
Python
|
oneflow/python/nn/modules/loss.py
|
qqsun8819/oneflow
|
b61e07b3406cc5c2d71f3d5e8b0f4de9b3fb9e40
|
[
"Apache-2.0"
] | null | null | null |
oneflow/python/nn/modules/loss.py
|
qqsun8819/oneflow
|
b61e07b3406cc5c2d71f3d5e8b0f4de9b3fb9e40
|
[
"Apache-2.0"
] | null | null | null |
oneflow/python/nn/modules/loss.py
|
qqsun8819/oneflow
|
b61e07b3406cc5c2d71f3d5e8b0f4de9b3fb9e40
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Optional
import oneflow as flow
from oneflow.python.framework.tensor import Tensor
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.nn.module import Module
from oneflow.python.nn.modules.constant import _ConstantBase
@oneflow_export("nn.L1Loss")
@experimental_api
class L1Loss(Module):
r"""This operator computes the L1 Loss between each element in `input` and `target`.
The equation is:
if reduction = "none":
.. math::
output = |Target - Input|
if reduction = "mean":
.. math::
output = \frac{1}{n}\sum_{i=1}^n|Target_i - Input_i|
if reduction = "sum":
.. math::
output = \sum_{i=1}^n|Target_i - Input_i|
Args:
input (oneflow.experimental.Tensor): The input Tensor.
target (oneflow.experimental.Tensor): The target Tensor.
reduction (str): The reduce type, it can be one of "none", "mean", "sum". Defaults to "mean".
Returns:
oneflow.experimental.Tensor: The result Tensor.
For example:
.. code-block:: python
>>> import oneflow.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> input = flow.Tensor([[1, 1, 1], [2, 2, 2], [7, 7, 7]], dtype = flow.float32)
>>> target = flow.Tensor([[4, 4, 4], [4, 4, 4], [4, 4, 4]], dtype = flow.float32)
>>> m = flow.nn.L1Loss(reduction="none")
>>> out = m(input, target)
>>> out
tensor([[3., 3., 3.],
[2., 2., 2.],
[3., 3., 3.]], dtype=oneflow.float32)
>>> m_mean = flow.nn.L1Loss(reduction="mean")
>>> out = m_mean(input, target)
>>> out
tensor([2.6667], dtype=oneflow.float32)
>>> m_mean = flow.nn.L1Loss(reduction="sum")
>>> out = m_mean(input, target)
>>> out
tensor([24.], dtype=oneflow.float32)
"""
def __init__(self, reduction: str = "mean", reduce=True) -> None:
super().__init__()
if reduce is not None and not reduce:
raise ValueError("Argument reduce is not supported yet")
assert reduction in [
"none",
"mean",
"sum",
None,
], "only 'sum', 'mean' and 'none' supported by now"
self.reduction = reduction
def forward(self, input, target):
assert (
input.shape == target.shape
), "The Input shape must be the same as Target shape"
l1_value = flow.experimental.abs(flow.experimental.sub(target, input))
if self.reduction == "mean":
return flow.experimental.mean(l1_value)
elif self.reduction == "sum":
return flow.experimental.sum(l1_value)
else:
return l1_value
@oneflow_export("nn.CrossEntropyLoss")
@experimental_api
class CrossEntropyLoss(Module):
r"""This criterion combines :class:`~flow.nn.LogSoftmax` and :class:`~flow.nn.NLLLoss` in one single class.
It is useful when training a classification problem with `C` classes.
The `input` is expected to contain raw, unnormalized scores for each class.
`input` has to be a Tensor of size either :math:`(minibatch, C)` or
:math:`(minibatch, C, d_1, d_2, ..., d_K)`
with :math:`K \geq 1` for the `K`-dimensional case (described later).
This criterion expects a class index in the range :math:`[0, C-1]` as the
`target` for each value of a 1D tensor of size `minibatch`;
The loss can be described as:
.. math::
\text{loss}(x, class) = -\log\left(\frac{\exp(x[class])}{\sum_j \exp(x[j])}\right)
= -x[class] + \log\left(\sum_j \exp(x[j])\right)
Can also be used for higher dimension inputs, such as 2D images, by providing
an input of size :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`,
where :math:`K` is the number of dimensions, and a target of appropriate shape
(see below).
Args:
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will
be applied, ``'mean'``: the weighted mean of the output is taken,
``'sum'``: the output will be summed. Default: ``'mean'``
For example:
.. code-block:: python
>>> import oneflow.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> input = flow.Tensor(
... [[-0.1664078, -1.7256707, -0.14690138],
... [-0.21474946, 0.53737473, 0.99684894],
... [-1.135804, -0.50371903, 0.7645404]], dtype=flow.float32)
>>> target = flow.Tensor(np.array([0, 1, 2]), dtype=flow.int32)
>>> out = flow.nn.CrossEntropyLoss(reduction="none")(input, target)
>>> print(out.numpy())
[0.80199665 1.1166505 0.35826024]
>>> out_sum = flow.nn.CrossEntropyLoss(reduction="sum")(input, target)
>>> print(out_sum.numpy())
[2.2769072]
>>> out_mean = flow.nn.CrossEntropyLoss(reduction="mean")(input, target)
>>> print(out_mean.numpy())
[0.75896907]
"""
def __init__(
self,
weight=None,
ignore_index: Optional[int] = None,
reduction: Optional[str] = "mean",
) -> None:
super().__init__()
if weight is not None:
raise ValueError("Argument weight is not supported yet")
assert reduction in [
"sum",
"none",
"mean",
None,
], "only 'sum', 'mean' and None supported by now"
self.ignore_index = ignore_index
self.reduction = reduction
self._op = (
flow.builtin_op("sparse_softmax_cross_entropy")
.Input("prediction")
.Input("label")
.Output("prob")
.Output("out")
.Build()
)
def forward(self, input, target):
assert len(input.shape) <= 4
assert len(target.shape) == len(input.shape) - 1
input_shape_len = len(input.shape)
if input_shape_len == 3:
b, c, h = input.shape[0], input.shape[1], input.shape[2]
input = flow.F.transpose(input, perm=(0, 2, 1))
input = input.reshape(shape=[-1, input.shape[2]])
target = target.flatten()
elif input_shape_len == 4:
b, c, h, w = input.shape[0], input.shape[1], input.shape[2], input.shape[3]
input = flow.F.transpose(input, perm=(0, 2, 3, 1))
input = input.reshape(shape=[-1, input.shape[3]])
target = target.flatten()
elif input_shape_len >= 5:
raise NotImplemented
prob, out = self._op(input, target, depth=input.shape[len(input.shape) - 1])
if self.ignore_index is not None:
zeros = flow.experimental.zeros(
size=out.shape, dtype=out.dtype, device=out.device
)
condition = flow.experimental.eq(target, self.ignore_index)
ones = flow.experimental.ones(
size=condition.shape, dtype=condition.dtype, device=condition.device
)
condition = ones.sub(condition).reshape(tuple(out.shape))
out = flow.experimental.where(condition, out, zeros)
if self.reduction == "mean":
reduce_sum = out.sum()
reduce_count = condition.argwhere().shape[0]
out = flow.experimental.mul(reduce_sum, 1.0 / reduce_count)
if self.reduction == "mean":
return out.mean()
elif self.reduction == "sum":
return out.sum()
else:
if input_shape_len == 4:
out = out.reshape((b, h, w))
return out
@oneflow_export("nn.BCELoss")
@experimental_api
class BCELoss(Module):
r"""This operator computes the binary cross entropy loss.
The equation is:
if reduction = "none":
.. math::
out = -(Target_i*log(Input_i) + (1-Target_i)*log(1-Input_i))
if reduction = "mean":
.. math::
out = -\frac{1}{n}\sum_{i=1}^n(Target_i*log(Input_i) + (1-Target_i)*log(1-Input_i))
if reduction = "sum":
.. math::
out = -\sum_{i=1}^n(Target_i*log(Input_i) + (1-Target_i)*log(1-Input_i))
Args:
weight (oneflow.experimental.Tensor, optional): The manual rescaling weight to the loss. Default to None, whose corresponding weight value is 1.
reduction (str, optional): The reduce type, it can be one of "none", "mean", "sum". Defaults to "mean".
Attention:
The input value must be in the range of (0, 1). Or the loss function may return `nan` value.
Returns:
oneflow.experimental.Tensor: The result Tensor.
For example:
.. code-block:: python
>>> import oneflow.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> input = flow.Tensor(np.array([[1.2, 0.2, -0.3], [0.7, 0.6, -2]]).astype(np.float32))
>>> target = flow.Tensor(np.array([[0, 1, 0], [1, 0, 1]]).astype(np.float32))
>>> weight = flow.Tensor(np.array([[2, 2, 2], [2, 2, 2]]).astype(np.float32))
>>> activation = flow.nn.Sigmoid()
>>> sigmoid_input = activation(input)
>>> m = flow.nn.BCELoss(weight, reduction="none")
>>> out = m(sigmoid_input, target)
>>> out
tensor([[2.9266, 1.1963, 1.1087],
[0.8064, 2.075 , 4.2539]], dtype=oneflow.float32)
>>> m_sum = flow.nn.BCELoss(weight, reduction="sum")
>>> out = m_sum(sigmoid_input, target)
>>> out
tensor([12.3668], dtype=oneflow.float32)
>>> m_mean = flow.nn.BCELoss(weight, reduction="mean")
>>> out = m_mean(sigmoid_input, target)
>>> out
tensor([2.0611], dtype=oneflow.float32)
"""
def __init__(self, weight, reduction: str = None) -> None:
super().__init__()
assert reduction in [
"none",
"sum",
"mean",
None,
], "only 'sum', 'mean' and 'none' supported by now"
self.weight = weight
self.reduction = reduction
def forward(self, input, target):
assert (
input.shape == target.shape
), "The Input shape must be the same as Target shape"
_cross_entropy_loss = flow.experimental.negative(
target * flow.experimental.log(input)
+ (1 - target) * flow.experimental.log(1 - input)
)
if self.weight is not None:
assert (
self.weight.shape == input.shape
), "The weight shape must be the same as Input shape"
_weighted_loss = self.weight * _cross_entropy_loss
else:
_weighted_loss = _cross_entropy_loss
if self.reduction == "mean":
return flow.experimental.mean(_weighted_loss)
elif self.reduction == "sum":
return flow.experimental.sum(_weighted_loss)
else:
return _weighted_loss
@oneflow_export("nn.NLLLoss")
@experimental_api
class NLLLoss(Module):
r""" The negative log likelihood loss. It is useful to train a classification
problem with `C` classes.
The `input` given through a forward call is expected to contain
log-probabilities of each class. `input` has to be a Tensor of size either
:math:`(minibatch, C)` or :math:`(minibatch, C, d_1, d_2, ..., d_K)`
with :math:`K \geq 1` for the `K`-dimensional case (described later).
Obtaining log-probabilities in a neural network is easily achieved by
adding a `LogSoftmax` layer in the last layer of your network.
You may use `CrossEntropyLoss` instead, if you prefer not to add an extra
layer.
The `target` that this loss expects should be a class index in the range :math:`[0, C-1]`
where `C = number of classes`;
The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = - w_{y_n} x_{n,y_n}, \quad
w_{c} = \mathbb{1},
where :math:`x` is the input, :math:`y` is the target, :math:`w` is the weight, and
:math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
(default ``'mean'``), then
.. math::
\ell(x, y) = \begin{cases}
\sum_{n=1}^N \frac{1}{N} l_n, &
\text{if reduction} = \text{`mean';}\\
\sum_{n=1}^N l_n, &
\text{if reduction} = \text{`sum'.}
\end{cases}
Can also be used for higher dimension inputs, such as 2D images, by providing
an input of size :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`,
where :math:`K` is the number of dimensions, and a target of appropriate shape
(see below). In the case of images, it computes NLL loss per-pixel.
Args:
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will
be applied, ``'mean'``: the weighted mean of the output is taken,
``'sum'``: the output will be summed. Default: ``'mean'``
For example:
.. code-block:: python
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> import numpy as np
>>> input = flow.Tensor(
... [[-0.1664078, -1.7256707, -0.14690138],
... [-0.21474946, 0.53737473, 0.99684894],
... [-1.135804, -0.50371903, 0.7645404]], dtype=flow.float32)
>>> target = flow.Tensor(np.array([0, 1, 2]), dtype=flow.int32)
>>> m = flow.nn.NLLLoss(reduction="none")
>>> out = m(input, target).numpy()
>>> print(out)
[ 0.1664078 -0.53737473 -0.7645404 ]
>>> m = flow.nn.NLLLoss(reduction="sum")
>>> out = m(input, target).numpy()
>>> print(out)
[-1.1355073]
>>> m = flow.nn.NLLLoss(reduction="mean")
>>> out = m(input, target).numpy()
>>> print(out)
[-0.37850246]
"""
def __init__(
self, weight=None, ignore_index: int = None, reduction: str = "mean",
) -> None:
super().__init__()
if weight != None:
raise ValueError("Argument weight is not supported yet")
assert reduction in [
"sum",
"none",
"mean",
None,
], "only 'sum', 'mean' and None supported by now"
self.ignore_index = ignore_index
self.reduction = reduction
self._dim_gather_op = (
flow.builtin_op("dim_gather")
.Input("input")
.Input("index")
.Output("output")
.Attr("dim", 1)
.Build()
)
def nllloss_1d(self, input, target):
target = flow.experimental.reshape(target, (target.shape[0], 1))
res = self._dim_gather_op(input, target)[0]
res = flow.experimental.squeeze(res, dim=[1])
return res
def forward(self, input, target):
assert len(input.shape) <= 4
assert len(target.shape) == len(input.shape) - 1
input = input.negative()
if len(input.shape) == 2:
res = self.nllloss_1d(input, target)
elif len(input.shape) == 3:
b, c, h = input.shape[0], input.shape[1], input.shape[2]
input = flow.F.transpose(input, perm=(0, 2, 1))
input = input.reshape(shape=[-1, input.shape[2]])
target = target.flatten()
res = self.nllloss_1d(input, target)
res = res.reshape((b, h))
elif len(input.shape) == 4:
b, c, h, w = input.shape[0], input.shape[1], input.shape[2], input.shape[3]
input = flow.F.transpose(input, perm=(0, 2, 3, 1))
input = input.reshape(shape=[-1, input.shape[3]])
target = target.flatten()
res = self.nllloss_1d(input, target)
res = res.reshape((b, h, w))
else:
raise NotImplemented
if self.ignore_index is not None:
zeros = flow.experimental.zeros(
size=res.shape, dtype=res.dtype, device=res.device
)
condition = flow.experimental.eq(target, self.ignore_index)
ones = flow.experimental.ones(
size=condition.shape, dtype=condition.dtype, device=condition.device
)
condition = ones.sub(condition).reshape(tuple(res.shape))
res = flow.experimental.where(condition, res, zeros)
if self.reduction == "mean":
res = res.sum()
reduce_count = condition.argwhere().shape[0]
res = flow.experimental.mul(res, 1.0 / reduce_count)
if self.reduction == "none":
return res
elif self.reduction == "sum":
return res.sum()
else:
return res.mean()
@oneflow_export("nn.KLDivLoss")
@experimental_api
class KLDivLoss(Module):
r"""The interface is consistent with PyTorch.
The documentation is referenced from:
https://pytorch.org/docs/stable/generated/torch.nn.KLDivLoss.html?highlight=kldivloss#torch.nn.KLDivLoss
The Kullback-Leibler divergence loss measure
`Kullback-Leibler divergence`_ is a useful distance measure for continuous
distributions and is often useful when performing direct regression over
the space of (discretely sampled) continuous output distributions.
As with :class:`~torch.nn.NLLLoss`, the `input` given is expected to contain
*log-probabilities* and is not restricted to a 2D Tensor.
The targets are interpreted as *probabilities* by default, but could be considered
as *log-probabilities* with :attr:`log_target` set to ``True``.
This criterion expects a `target` `Tensor` of the same size as the
`input` `Tensor`.
The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
.. math::
l(x,y) = L = \{ l_1,\dots,l_N \}, \quad
l_n = y_n \cdot \left( \log y_n - x_n \right)
where the index :math:`N` spans all dimensions of ``input`` and :math:`L` has the same
shape as ``input``. If :attr:`reduction` is not ``'none'`` (default ``'mean'``), then:
.. math::
\ell(x, y) = \begin{cases}
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';} \\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
In default :attr:`reduction` mode ``'mean'``, the losses are averaged for each minibatch over observations
**as well as** over dimensions. ``'batchmean'`` mode gives the correct KL divergence where losses
are averaged over batch dimension only. ``'mean'`` mode's behavior will be changed to the same as
``'batchmean'`` in the next major release.
.. _`kullback-leibler divergence`: https://en.wikipedia.org/wiki/Kullback-Leibler_divergence
Args:
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``.
``'none'``: no reduction will be applied.
``'batchmean'``: the sum of the output will be divided by batchsize.
``'sum'``: the output will be summed.
``'mean'``: the output will be divided by the number of elements in the output.
Default: ``'mean'``
log_target (bool, optional): Specifies whether `target` is passed in the log space.
Default: ``False``
.. note::
:attr:`reduction` = ``'mean'`` doesn't return the true kl divergence value, please use
:attr:`reduction` = ``'batchmean'`` which aligns with KL math definition.
In the next major release, ``'mean'`` will be changed to be the same as ``'batchmean'``.
Shape:
- Input: :math:`(N, *)` where :math:`*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
- Output: scalar by default. If :attr:``reduction`` is ``'none'``, then :math:`(N, *)`,
the same shape as the input
For example:
.. code-block:: python
>>> import oneflow.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> input = flow.Tensor([-0.9021705, 0.08798598, 1.04686249], dtype=flow.float32)
>>> target = flow.Tensor([1.22386942, -0.89729659, 0.01615712], dtype=flow.float32)
>>> m = flow.nn.KLDivLoss(reduction="none", log_target=False)
>>> out = m(input, target)
>>> out
tensor([ 1.3514, 0. , -0.0836], dtype=oneflow.float32)
>>> m = flow.nn.KLDivLoss(reduction="mean", log_target=False)
>>> out = m(input, target)
>>> out
tensor([0.4226], dtype=oneflow.float32)
>>> m = flow.nn.KLDivLoss(reduction="sum", log_target=True)
>>> out = m(input, target)
>>> out
tensor([5.7801], dtype=oneflow.float32)
"""
def __init__(self, reduction: str = "mean", log_target: bool = False,) -> None:
super().__init__()
assert reduction in [
"sum",
"none",
"mean",
None,
], "Argument reduction only support 'sum'/'mean'/'none'/None for now!"
self.reduction = reduction
self.log_target = log_target
def forward(self, input: Tensor, target: Tensor) -> Tensor:
if self.log_target:
_kl_div_loss = flow.experimental.exp(target) * (target - input)
else:
_kl_div_out_loss = target * (flow.experimental.log(target) - input)
_zeros = flow.experimental.zeros(
size=_kl_div_out_loss.shape,
dtype=_kl_div_out_loss.dtype,
device=_kl_div_out_loss.device,
)
# when target < 0, we set to `0`, when target > 0, we set to `1`.
_condition = flow.experimental.gt(target, 0)
# To avoid the `nan` value in log operation
# We set those positions which `target` is less than zero as `0`
_kl_div_loss = flow.experimental.where(_condition, _kl_div_out_loss, _zeros)
if self.reduction == "mean":
return flow.experimental.mean(_kl_div_loss)
elif self.reduction == "sum":
return flow.experimental.sum(_kl_div_loss)
else:
return _kl_div_loss
@oneflow_export("nn.MSELoss")
@experimental_api
class MSELoss(Module):
r"""The interface is consistent with PyTorch.
The documentation is referenced from:
https://pytorch.org/docs/stable/generated/torch.nn.MSELoss.html?highlight=mseloss#torch.nn.MSELoss
Creates a criterion that measures the mean squared error (squared L2 norm) between
each element in the input :math:`x` and target :math:`y`.
The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = \left( x_n - y_n \right)^2,
where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
(default ``'mean'``), then:
.. math::
\ell(x, y) =
\begin{cases}
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
of :math:`n` elements each.
The mean operation still operates over all the elements, and divides by :math:`n`.
The division by :math:`n` can be avoided if one sets ``reduction = 'sum'``.
Args:
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
Shape:
- Input: :math:`(N, *)` where :math:`*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import oneflow.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> input = flow.Tensor(
... [[-0.02557137, 0.03101675, 1.37493674],
... [0.25599439, -1.08372561, -0.21006816]], dtype=flow.float32)
>>> target = flow.Tensor(
... [[-1.53105064, -0.68137555, 0.5931354],
... [-0.49158347, 0.93673637, 0.1324141]], dtype=flow.float32)
>>> m = flow.nn.MSELoss(reduction="none")
>>> out = m(input, target)
>>> out
tensor([[2.2665, 0.5075, 0.6112],
[0.5589, 4.0823, 0.1173]], dtype=oneflow.float32)
>>> m = flow.nn.MSELoss(reduction="mean")
>>> out = m(input, target)
>>> out
tensor([1.3573], dtype=oneflow.float32)
>>> m = flow.nn.MSELoss(reduction="sum")
>>> out = m(input, target)
>>> out
tensor([8.1436], dtype=oneflow.float32)
"""
def __init__(self, reduction: str = "mean") -> None:
super().__init__()
assert reduction in [
"sum",
"none",
"mean",
None,
], "Argument reduction only support 'sum'/'mean'/'none'/None for now!"
self.reduction = reduction
def forward(self, input: Tensor, target: Tensor) -> Tensor:
mean_squared_difference = flow.experimental.square(
flow.experimental.sub(input, target)
)
if self.reduction == "mean":
return flow.experimental.mean(mean_squared_difference)
elif self.reduction == "sum":
return flow.experimental.sum(mean_squared_difference)
else:
# Do no reduction
return mean_squared_difference
@oneflow_export("nn.MarginRankingLoss")
@experimental_api
class MarginRankingLoss(Module):
r"""Creates a criterion that measures the loss given
inputs :math:`x1`, :math:`x2`, two 1D mini-batch `Tensors`,
and a label 1D mini-batch tensor :math:`y` (containing 1 or -1).
If :math:`y = 1` then it assumed the first input should be ranked higher
(have a larger value) than the second input, and vice-versa for :math:`y = -1`.
The loss function for each sample in the mini-batch is:
.. math::
\text{loss}(x1, x2, y) = \max(0, -y * (x1 - x2) + \text{margin})
Args:
margin (float, optional): Has a default value of :math:`0`.
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
Shape:
- `x1` : :math:`(N, D)` where `N` is the batch size and `D` is the size of a sample.
- `x2` : :math:`(N, D)` where `N` is the batch size and `D` is the size of a sample.
- Target: :math:`(N)`
- Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N)`.
For example:
.. code-block:: python
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> import numpy as np
>>> x1 = flow.Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=flow.float32)
>>> x2 = flow.Tensor(np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]), dtype=flow.float32)
>>> target = flow.Tensor(np.array([[1, -1, 1],[-1, 1, -1], [1, 1, 1]]), dtype=flow.float32)
>>> m = flow.nn.MarginRankingLoss(margin =1.0, reduction="none")
>>> out = m(x1, x2, target)
>>> out
tensor([[2., 1., 0.],
[3., 0., 5.],
[0., 0., 0.]], dtype=oneflow.float32)
>>> m = flow.nn.MarginRankingLoss(margin = 0.3, reduction="sum")
>>> out = m(x1, x2, target)
>>> out
tensor([8.2], dtype=oneflow.float32)
>>> m = flow.nn.MarginRankingLoss(margin = 10, reduction="mean")
>>> out = m(x1, x2, target)
>>> out
tensor([8.3333], dtype=oneflow.float32)
"""
def __init__(self, margin=0.0, reduction: str = "mean") -> None:
super().__init__()
self.margin = margin
assert reduction in [
"sum",
"none",
"mean",
None,
], "only 'sum', 'mean' and None supported by now"
self.reduction = reduction
def forward(self, input1, input2, target):
res = flow.experimental.clip(
flow.experimental.add(
self.margin,
flow.experimental.mul(
target,
flow.experimental.mul(-1, flow.experimental.sub(input1, input2)),
),
),
min=0.0,
)
if self.reduction == "none":
return res
elif self.reduction == "sum":
return res.sum()
else:
return res.mean()
@oneflow_export("nn.CTCLoss")
@experimental_api
class CTCLoss(Module):
r"""The Connectionist Temporal Classification loss.
The interface is consistent with PyTorch.
The documentation is referenced from:
https://pytorch.org/docs/stable/generated/torch.nn.CTCLoss.html#torch.nn.CTCLoss
Calculates loss between a continuous (unsegmented) time series and a target sequence. CTCLoss sums over the
probability of possible alignments of input to target, producing a loss value which is differentiable
with respect to each input node. The alignment of input to target is assumed to be "many-to-one", which
limits the length of the target sequence such that it must be :math:`\leq` the input length.
Args:
blank (int, optional): blank label. Default :math:`0`.
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the output losses will be divided by the target lengths and
then the mean over the batch is taken. Default: ``'mean'``
zero_infinity (bool, optional):
Whether to zero infinite losses and the associated gradients.
Default: ``False``
Infinite losses mainly occur when the inputs are too short
to be aligned to the targets.
Shape:
- Log_probs: Tensor of size :math:`(T, N, C)`,
where :math:`T = \text{input length}`,
:math:`N = \text{batch size}`, and
:math:`C = \text{number of classes (including blank)}`.
- Targets: Tensor of size :math:`(N, S)` or
:math:`(\operatorname{sum}(\text{target\_lengths}))`,
where :math:`N = \text{batch size}` and
:math:`S = \text{max target length, if shape is } (N, S)`.
It represent the target sequences. Each element in the target
sequence is a class index. And the target index cannot be blank (default=0).
In the :math:`(N, S)` form, targets are padded to the
length of the longest sequence, and stacked.
In the :math:`(\operatorname{sum}(\text{target\_lengths}))` form,
the targets are assumed to be un-padded and
concatenated within 1 dimension.
- Input_lengths: Tuple or tensor of size :math:`(N)`,
where :math:`N = \text{batch size}`. It represent the lengths of the
inputs (must each be :math:`\leq T`). And the lengths are specified
for each sequence to achieve masking under the assumption that sequences
are padded to equal lengths.
- Target_lengths: Tuple or tensor of size :math:`(N)`,
where :math:`N = \text{batch size}`. It represent lengths of the targets.
Lengths are specified for each sequence to achieve masking under the
assumption that sequences are padded to equal lengths. If target shape is
:math:`(N,S)`, target_lengths are effectively the stop index
:math:`s_n` for each target sequence, such that ``target_n = targets[n,0:s_n]`` for
each target in a batch. Lengths must each be :math:`\leq S`
If the targets are given as a 1d tensor that is the concatenation of individual
targets, the target_lengths must add up to the total length of the tensor.
Reference:
A. Graves et al.: Connectionist Temporal Classification:
Labelling Unsegmented Sequence Data with Recurrent Neural Networks:
https://www.cs.toronto.edu/~graves/icml_2006.pdf
For example:
.. code-block:: python
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> import numpy as np
>>> log_probs = np.array(
... [
... [[-1.1031, -0.7998, -1.5200], [-0.9808, -1.1363, -1.1908]],
... [[-1.2258, -1.0665, -1.0153], [-1.1135, -1.2331, -0.9671]],
... [[-1.3348, -0.6611, -1.5118], [-0.9823, -1.2355, -1.0941]],
... [[-1.3850, -1.3273, -0.7247], [-0.8235, -1.4783, -1.0994]],
... [[-0.9049, -0.8867, -1.6962], [-1.4938, -1.3630, -0.6547]],
... ]
... ).astype(np.float32)
>>> log_probs = flow.Tensor(log_probs, dtype=flow.float32)
>>> targets = flow.Tensor(np.array([[1, 2, 2], [1, 2, 2]]).astype("int32"), dtype=flow.int32)
>>> input_lengths = flow.Tensor(np.array([5, 5]).astype("int32"), dtype=flow.int32)
>>> target_lengths = flow.Tensor(np.array([3, 3]).astype("int32"), dtype=flow.int32)
>>> loss_mean = flow.nn.CTCLoss()
>>> out = loss_mean(log_probs, targets, input_lengths, target_lengths)
>>> out
tensor([1.1376], dtype=oneflow.float32)
>>> loss_sum = flow.nn.CTCLoss(blank=0, reduction="sum")
>>> out = loss_sum(log_probs, targets, input_lengths, target_lengths)
>>> out
tensor([6.8257], dtype=oneflow.float32)
>>>
"""
def __init__(
self, blank: int = 0, reduction: str = "mean", zero_infinity: bool = False,
) -> None:
super().__init__()
assert reduction in [
"sum",
"none",
"mean",
None,
], "only 'sum', 'mean' and None supported by now"
self.reduction = reduction
self.zero_infinity = zero_infinity
self._op = (
flow.builtin_op("ctc_loss")
.Input("log_probs")
.Input("targets")
.Input("input_lengths")
.Input("target_lengths")
.Output("loss")
.Output("alpha")
.Attr("blank", int(blank))
.Attr("zero_infinity", zero_infinity)
.Build()
)
self._xdivy_op = (
flow.builtin_op("xdivy").Input("x").Input("y").Output("z").Build()
)
self.constant = _ConstantBase
def forward(
self,
log_probs: Tensor,
targets: Tensor,
input_lengths: Tensor,
target_lengths: Tensor,
) -> Tensor:
loss, _ = self._op(log_probs, targets, input_lengths, target_lengths)
if self.zero_infinity:
cond = flow.experimental.eq(
loss,
self.constant(
size=loss.shape,
value=float("inf"),
dtype=loss.dtype,
device=loss.device,
)(),
)
loss = flow.experimental.where(
cond,
flow.experimental.zeros(
size=loss.shape, dtype=loss.dtype, device=loss.device
),
loss,
)
if self.reduction == "mean":
return flow.experimental.mean(
self._xdivy_op(
loss,
flow.experimental.cast(
flow.experimental.clamp(target_lengths, min=1),
dtype=log_probs.dtype,
),
)[0]
)
elif self.reduction == "sum":
return flow.experimental.sum(loss)
else:
return loss
@oneflow_export("nn.BCEWithLogitsLoss")
@experimental_api
class BCEWithLogitsLoss(Module):
r"""This operator combines the `Sigmoid` and `BCELoss` together. For numerical stability,
we apply some math tricks instead of using `Sigmoid` layer with `BCELoss`.
The equation is:
if :attr:`reduction` = ``"none"``:
.. math::
out = -weight*[Pos\_weight*y*log\sigma({x}) + (1-y)*log(1-\sigma(x))]
if :attr:`reduction` = ``"mean"``:
.. math::
out = -\frac{weight}{n}\sum_{i=1}^n[Pos\_weight*y*log\sigma({x}) + (1-y)*log(1-\sigma(x))]
if :attr:`reduction` = ``"sum"``:
.. math::
out = -weight*\sum_{i=1}^n[Pos\_weight*y*log\sigma({x}) + (1-y)*log(1-\sigma(x))]
Args:
weight (Tensor, optional): The manual rescaling weight to the loss. Default: ``None``
size_average (bool, optional) – Deprecated (see :attr:`reduction`). Default: ``True``
reduce (bool, optional) – Deprecated (see :attr:`reduction`). Default: ``True``
reduction (str, optional): The reduce type, it can be one of ``"none"``, ``"mean"``, ``"sum"``.
``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided
by the number of elements in the output, ``'sum'``: the output will be summed. Default: ``"mean"``
pos_weight (Tensor, optional): The manual rescaling weight to the positive examples.
Default: ``None``
Shape:
- Input: :math:`(N,*)` where `*` means, any number of additional dimensions
- Target: :math:`(N,*)`, same shape as the input
- Output: scalar. If :attr:`reduction` is ``"none"``, then :math:`(N,*)`, same shape as input.
For example:
.. code-block:: python
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> import oneflow.typing as tp
>>> input = flow.Tensor([[1.2, 0.2, -0.3], [0.7, 0.6, -2], [0.7, 0.6, -2]], dtype=flow.float32)
>>> target = flow.Tensor([[0, 1, 0], [1, 0, 1], [1, 0, 1]], dtype=flow.float32)
>>> weight = flow.Tensor([[2, 2, 2], [2, 2, 2], [2, 2, 2]], dtype=flow.float32)
>>> pos_weight = flow.Tensor([1.2, 1.3, 1.4], dtype=flow.float32)
>>> m = flow.nn.BCEWithLogitsLoss(weight=weight, pos_weight=pos_weight, reduction="none")
>>> out = m(input, target)
>>> out
tensor([[2.9266, 1.5552, 1.1087],
[0.9676, 2.075 , 5.9554],
[0.9676, 2.075 , 5.9554]], dtype=oneflow.float32)
>>> m = flow.nn.BCEWithLogitsLoss(weight=weight, pos_weight=pos_weight, reduction="mean")
>>> out = m(input, target)
>>> out
tensor([2.6207], dtype=oneflow.float32)
>>> m = flow.nn.BCEWithLogitsLoss(weight=weight, pos_weight=pos_weight, reduction="sum")
>>> out = m(input, target)
>>> out
tensor([23.5865], dtype=oneflow.float32)
"""
def __init__(
self,
weight=None,
size_average: bool = True,
reduce: bool = True,
reduction: Optional[str] = "mean",
pos_weight=None,
) -> None:
super().__init__()
assert reduction in [
"sum",
"none",
"mean",
None,
], "only 'sum', 'mean' and None supported by now"
self.weight = weight
self.size_average = size_average
self.reduce = reduce
self.reduction = reduction
self.pos_weight = pos_weight
def forward(self, input, target):
if not (target.shape == input.shape):
raise ValueError(
"Target size ({}) must be the same as input size ({})".format(
target.size(), input.size()
)
)
_neg_input = flow.experimental.negative(input)
_max_val = flow.experimental.clip(_neg_input, 0)
_neg_max_val = flow.experimental.negative(_max_val)
if self.pos_weight:
_log_weight = ((self.pos_weight - 1) * target) + 1
_loss = (1 - target) * input + _log_weight * (
flow.experimental.log(
flow.experimental.exp(_neg_max_val)
+ flow.experimental.exp(_neg_input - _max_val)
)
+ _max_val
)
else:
_loss = (1 - target) * input + _max_val
_loss += flow.experimental.log(
flow.experimental.exp(_neg_max_val)
+ flow.experimental.exp(_neg_input - _max_val)
)
if self.weight is not None:
assert (
self.weight.shape == input.shape
), "The weight shape must be the same as Input shape"
_weighted_loss = self.weight * _loss
else:
_weighted_loss = _loss
if self.reduction == "mean":
return flow.experimental.mean(_weighted_loss)
elif self.reduction == "sum":
return flow.experimental.sum(_weighted_loss)
else:
# Do no reduction
return _weighted_loss
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| 37.732444
| 152
| 0.568541
|
4a0895a825e3ddf0320a9158350e2019404fcde2
| 796
|
py
|
Python
|
remove_duplicates.py
|
raajatk/PythonCodingPractice
|
55de772fd7515c70ffd7bda0d967096db82c5127
|
[
"MIT"
] | null | null | null |
remove_duplicates.py
|
raajatk/PythonCodingPractice
|
55de772fd7515c70ffd7bda0d967096db82c5127
|
[
"MIT"
] | null | null | null |
remove_duplicates.py
|
raajatk/PythonCodingPractice
|
55de772fd7515c70ffd7bda0d967096db82c5127
|
[
"MIT"
] | null | null | null |
"""Write a program (function!) that takes a list and returns a new list that
contains all the elements of the first list minus all the duplicates.
Write two different functions to do this - one using a loop and constructing
a list, and another using sets.
"""
def remove_duplicates_using_loop(list):
new_list = []
for i in range(len(list)):
if list[i] not in new_list:
new_list.append(list[i])
return new_list
def remove_duplicates_using_set(list):
set1 = set(list)
new_list = [i for i in set1]
return new_list
list = [int(i) for i in input("Please input the list(elements separated by commas(,))\n").split(',')]
print("The output using sets ",remove_duplicates_using_set(list))
print("The output using loops ",remove_duplicates_using_loop(list))
| 33.166667
| 101
| 0.71608
|
4a0895ec180c56431801e1612d20b60279c52193
| 6,290
|
py
|
Python
|
server/api/views/accounts.py
|
guiloga/scalade
|
fd59b239fb35e8a7028baea3ed6d4b23282c200d
|
[
"MIT"
] | 4
|
2021-12-22T18:07:10.000Z
|
2021-12-29T09:22:44.000Z
|
server/api/views/accounts.py
|
guiloga/scalade
|
fd59b239fb35e8a7028baea3ed6d4b23282c200d
|
[
"MIT"
] | null | null | null |
server/api/views/accounts.py
|
guiloga/scalade
|
fd59b239fb35e8a7028baea3ed6d4b23282c200d
|
[
"MIT"
] | null | null | null |
from api.serializers.accounts import (
AccountListSerializer,
AccountSerializer,
ActionCreationSerializer,
ActionListSerializer,
ActionSerializer,
BusinessListSerializer,
BusinessSerializer,
UserListSerializer,
UserSerializer,
WorkspaceCreationSerializer,
WorkspaceListSerializer,
WorkspaceSerializer,
)
from api.views import BaseAPIViewSet
from api.views.mixins import (
ListViewSetMixin,
ReadOnlyWithNoFiltersViewSetMixin,
RetrieveViewSetMixin,
)
from common.utils import DecoratorShipper as Decorators
from common.utils import ModelManager
from django.core.exceptions import ObjectDoesNotExist
from rest_framework.exceptions import ParseError
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK, HTTP_201_CREATED
class WorkspaceViewSet(ListViewSetMixin, RetrieveViewSetMixin, BaseAPIViewSet):
"""
API CRUD for Workspace, it implements: list, create and retrieve methods.
"""
app_model_name = "accounts.workspace"
ListSerializer = WorkspaceListSerializer
RetrieveSerializer = WorkspaceSerializer
@Decorators.with_permission("accounts.view_workspacemodel")
def list(self, request):
queryset = request.user.workspaces.all()
return super().list(request, qs=queryset)
@Decorators.with_permission("accounts.add_workspacemodel")
def create(self, request):
creation_serializer = WorkspaceCreationSerializer(data=request.data)
creation_serializer.is_valid(raise_exception=True)
ws = creation_serializer.save()
serializer = WorkspaceSerializer(ws)
return Response(serializer.data, status=HTTP_201_CREATED)
@Decorators.with_permission("accounts.view_workspacemodel")
def retrieve(self, request, uuid):
return super().retrieve(request, uuid=uuid)
class AccountViewSet(RetrieveViewSetMixin, BaseAPIViewSet):
"""
API CRUD for Account, it implements: list and retrieve methods.
"""
app_model_name = "accounts.account"
RetrieveSerializer = AccountSerializer
VALID_FILTERS = ("is_staff", "is_active", "related_workspace")
@Decorators.with_permission("accounts.view_accountmodel")
def list(self, request):
filters = {}
initial_queryset = None
for key, value in request.query_params.items():
self.check_filter(key)
if key not in ["limit", "offset"]:
if key in ["is_staff", "is_active"]:
filters[key] = True if value == "true" else False
elif key == "related_workspace":
workspace_uuid = request.query_params.get(
"related_workspace"
)
try:
workspace = ModelManager.handle(
"accounts.workspace", "get", uuid=workspace_uuid
)
except ObjectDoesNotExist:
raise ParseError(
"Workspace '%s' doesn't exist." % workspace_uuid
)
initial_queryset = workspace.accounts.all()
if initial_queryset:
accounts = initial_queryset.filter(**filters)
else:
accounts = ModelManager.handle(
"accounts.account", "filter", **filters
)
items, metadata = self.filter_paginated_results(request, accounts)
list_serializer = AccountListSerializer(
items, many=True, request=request
)
response_data = {
"total_queryset": len(accounts),
"count": len(items),
"data": list_serializer.data,
"metadata": {**metadata, **{"valid_filters": self.VALID_FILTERS}},
}
return Response(response_data, status=HTTP_200_OK)
@Decorators.with_permission("accounts.view_accountmodel")
def retrieve(self, request, uuid):
return super().retrieve(request, uuid=uuid)
class BusinessViewSet(ReadOnlyWithNoFiltersViewSetMixin, BaseAPIViewSet):
"""
API CRUD for Business.
It implements only list and retrieve methods without listing filters.
"""
app_model_name = "accounts.business"
ListSerializer = BusinessListSerializer
RetrieveSerializer = BusinessSerializer
@Decorators.with_permission("accounts.view_businessmodel")
def list(self, request):
return super().list(request)
@Decorators.with_permission("accounts.view_businessmodel")
def retrieve(self, request, uuid):
return super().retrieve(request, uuid=uuid)
class UserViewSet(ReadOnlyWithNoFiltersViewSetMixin, BaseAPIViewSet):
"""
API CRUD for User.
It implements only list and retrieve methods without listing filters.
"""
app_model_name = "accounts.user"
ListSerializer = UserListSerializer
RetrieveSerializer = UserSerializer
@Decorators.with_permission("accounts.view_usermodel")
def list(self, request):
return super().list(request)
@Decorators.with_permission("accounts.view_usermodel")
def retrieve(self, request, uuid):
return super().retrieve(request, uuid=uuid)
class ActionViewSet(ListViewSetMixin, RetrieveViewSetMixin, BaseAPIViewSet):
"""
API CRUD for Actions, it implements: list, create and retrieve methods.
"""
app_model_name = "accounts.action"
ListSerializer = ActionListSerializer
RetrieveSerializer = ActionSerializer
VALID_FILTERS = ("kind", "level")
@Decorators.with_permission("accounts.view_actionmodel")
def list(self, request):
queryset = request.user.actions.all()
return super().list(request, qs=queryset)
@Decorators.with_permission("accounts.add_actionmodel")
def create(self, request):
creation_serializer = ActionCreationSerializer(data=request.data)
creation_serializer.is_valid(raise_exception=True)
action = creation_serializer.save(account=request.user)
serializer = ActionSerializer(action)
return Response(serializer.data, status=HTTP_201_CREATED)
@Decorators.with_permission("accounts.view_actionmodel")
def retrieve(self, request, uuid):
return super().retrieve(request, uuid=uuid)
| 34.56044
| 79
| 0.681558
|
4a0896c0400d018ef7aedfc0bfbabfbff443b629
| 4,094
|
py
|
Python
|
unit_tests/test_cli_alias_add.py
|
hep-gc/cloud-scheduler-2
|
180d9dc4f8751cf8c8254518e46f83f118187e84
|
[
"Apache-2.0"
] | null | null | null |
unit_tests/test_cli_alias_add.py
|
hep-gc/cloud-scheduler-2
|
180d9dc4f8751cf8c8254518e46f83f118187e84
|
[
"Apache-2.0"
] | null | null | null |
unit_tests/test_cli_alias_add.py
|
hep-gc/cloud-scheduler-2
|
180d9dc4f8751cf8c8254518e46f83f118187e84
|
[
"Apache-2.0"
] | null | null | null |
from unit_test_common import execute_csv2_command, initialize_csv2_request, ut_id, sanity_commands, parameters_commands
from sys import argv
# lno: AV - error code identifier.
def main(gvar):
if not gvar:
gvar = {}
if len(argv) > 1:
initialize_csv2_request(gvar, selections=argv[1])
else:
initialize_csv2_request(gvar)
# 01 - 13
sanity_commands(gvar, 'alias')
# 14 - 27
sanity_commands(gvar, 'alias', 'add')
parameters = {
# 28 Omit name.
'--cloud-name': {'valid': ut_id(gvar, 'clc2'), 'test_cases': {
# 29
'': 'cloud alias add, value specified for "cloud_name" must not be the empty string.',
# 30
'Invalid-Unit-Test': 'cloud alias add, value specified for "cloud_name" must be all lowercase letters, digits, dashes, underscores, periods, and colons, and cannot contain more than one consecutive dash or start or end with a dash.',
# 31
'invalid-unit--test': 'cloud alias add, value specified for "cloud_name" must be all lowercase letters, digits, dashes, underscores, periods, and colons, and cannot contain more than one consecutive dash or start or end with a dash.',
# 32
'invalid-unit-test-': 'cloud alias add, value specified for "cloud_name" must be all lowercase letters, digits, dashes, underscores, periods, and colons, and cannot contain more than one consecutive dash or start or end with a dash.',
# 33
'invalid-unit-test!': 'cloud alias add, value specified for "cloud_name" must be all lowercase letters, digits, dashes, underscores, periods, and colons, and cannot contain more than one consecutive dash or start or end with a dash.',
# 34 Comma is used for a list of clouds, skip this test
#'invalid,unit,test': 'cloud alias add, value specified for "cloud_name" must be all lowercase letters, digits, dashes, underscores, periods, and colons, and cannot contain more than one consecutive dash or start or end with a dash.',
# 35 Specify a cloud that does not exist.
'invalid-unit-test': 'cloud alias add, "invalid-unit-test" failed - specified value in list of values does not exist: cloud_name=invalid-unit-test, group_name={}.'.format(ut_id(gvar, 'clg1'))
}, 'mandatory': True},
# 36 Omit alias.
'--alias-name': {'valid': 'invalid-unit-test', 'test_cases': {
# 37
'': 'cloud alias add, value specified for "alias_name" must not be the empty string.',
# 38
'invalid-unit-test!': 'cloud alias add, value specified for "alias_name" must be all lowercase letters, digits, dashes, underscores, periods, and colons, and cannot contain more than one consecutive dash or start or end with a dash.',
# 39
'alias-name-that-is-too-long-for-the-database': 'Data too long for column \'alias_name\' at row 1',
# 40 Attempt to create an alias that already exists.
ut_id(gvar, 'cla1'): 'cloud alias add "{}.{}" failed - specified alias already exists.'.format(ut_id(gvar, 'clg1'), ut_id(gvar, 'cla1'))
}, 'mandatory': True},
}
parameters_commands(gvar, 'alias', 'add', ut_id(gvar, 'clg1'), ut_id(gvar, 'clu3'), parameters)
# 41 Create an alias properly.
execute_csv2_command(
gvar, 0, None, 'cloud alias "{}.{}" successfully added.'.format(ut_id(gvar, 'clg1'), ut_id(gvar, 'cla3')),
['alias', 'add', '-g', ut_id(gvar, 'clg1'), '--alias-name', ut_id(gvar, 'cla3'), '--cloud-name', ut_id(gvar, 'clc2'), '-su', ut_id(gvar, 'clu3')],
)
# 42 Create an alias has multiple clouds
execute_csv2_command(
gvar, 0, None, 'cloud alias "{}.{}" successfully added.'.format(ut_id(gvar, 'clg1'), ut_id(gvar, 'cla4')),
['alias', 'add', '-g', ut_id(gvar, 'clg1'), '--alias-name', ut_id(gvar, 'cla4'), '--cloud-name', '{},{}'.format(ut_id(gvar, 'clc2'), ut_id(gvar, 'clc1')), '-su', ut_id(gvar, 'clu3')],
)
if __name__ == "__main__":
main(None)
| 60.205882
| 246
| 0.636541
|
4a0897e5dc85c3370ec9e12013664205181567a8
| 10,600
|
py
|
Python
|
features/steps/create_report_test_after_user_deleted.py
|
profesormig/quimica3a
|
a453f0d7485ebc4b2d7b06a72b44c6c179a3bbd4
|
[
"BSD-3-Clause"
] | null | null | null |
features/steps/create_report_test_after_user_deleted.py
|
profesormig/quimica3a
|
a453f0d7485ebc4b2d7b06a72b44c6c179a3bbd4
|
[
"BSD-3-Clause"
] | null | null | null |
features/steps/create_report_test_after_user_deleted.py
|
profesormig/quimica3a
|
a453f0d7485ebc4b2d7b06a72b44c6c179a3bbd4
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import uuid
from datetime import timedelta
from behave import *
from django.db.models import Sum
from api.tests.factories import (
UserFactory, InstanceFactory, IdentityFactory, InstanceStatusFactory,
ProviderFactory, ProviderMachineFactory, InstanceHistoryFactory)
from core.models import *
from core.models.allocation_source import total_usage
from jetstream.exceptions import TASPluginException
from jetstream.models import *
logger = logging.getLogger(__name__)
@given('a test Allocation Source')
def step_impl(context):
context.current_time = timezone.now()
name, compute_allowed = "testSource", 1000
context.allocation_source = AllocationSource.objects.create(name=name, compute_allowed=compute_allowed)
# source = AllocationSource.objects.filter(name=name)
assert (len(AllocationSource.objects.filter(name=name)) > 0)
@when('Allocation Source is assigned to Users')
def step_impl(context):
context.users = []
for row in context.table:
number_of_users = int(row['number of users assigned to allocation source'])
context.number_of_users = number_of_users
for i in range(number_of_users):
user = UserFactory.create(date_joined=context.current_time)
context.users.append(user)
UserAllocationSource.objects.create(allocation_source=context.allocation_source, user=user)
assert (len(UserAllocationSource.objects.filter(user=user, allocation_source=context.allocation_source)) > 0)
@when('All Users run an instance on Allocation Source for indefinite duration')
def step_impl(context):
for row in context.table:
cpu_size = int(row['cpu size of instance'])
context.cpu_size = cpu_size
for user in context.users:
alias = launch_instance(user, context.current_time, cpu_size)
payload = {}
payload["instance_id"] = str(alias)
payload["allocation_source_name"] = context.allocation_source.name
EventTable.objects.create(name="instance_allocation_source_changed",
payload=payload,
entity_id=user.username,
timestamp=context.current_time)
assert (len(InstanceStatusHistory.objects.filter(instance__created_by=user)) == 1)
@when('create_reports task is run for the first time')
def step_impl(context):
for row in context.table:
interval_time = int(row['task runs every x minutes'])
context.interval_time = interval_time
report_end_date = context.current_time + timedelta(minutes=interval_time)
create_reports(end_date=report_end_date)
assert (len(TASAllocationReport.objects.all()) > 0)
assert (TASAllocationReport.objects.last().end_date == report_end_date)
assert (TASAllocationReport.objects.last().start_date == context.current_time)
expected_initial_usage = context.cpu_size * context.interval_time * context.number_of_users
calculated_initial_usage = float(
TASAllocationReport.objects.filter(project_name=context.allocation_source.name).aggregate(Sum('compute_used'))[
'compute_used__sum']) * 60
assert (round(calculated_initial_usage, 2) == expected_initial_usage)
context.current_time = context.current_time + timedelta(minutes=interval_time)
@when('Users are deleted from Allocation Source after first create_reports run')
def step_impl(context):
for row in context.table:
users_deleted = int(row['number of users deleted from allocation source'])
users_deleted_after_time = int(row['users deleted x minutes after the first create_reports run'])
for i in range(users_deleted):
user = context.users[i]
payload = {}
payload["allocation_source_name"] = context.allocation_source.name
EventTable.objects.create(
payload=payload,
name="user_allocation_source_deleted",
entity_id=user.username,
timestamp=context.current_time + timedelta(minutes=users_deleted_after_time))
assert (len(UserAllocationSource.objects.filter(user=user, allocation_source=context.allocation_source)) == 0)
@then(
'Total expected allocation usage for allocation source matches calculated allocation usage from reports after next create_reports run')
def step_impl(context):
for row in context.table:
total_expected_usage = int(row['total expected allocation usage in minutes'])
report_end_date = context.current_time + timedelta(minutes=context.interval_time)
create_reports(end_date=report_end_date)
assert (len(TASAllocationReport.objects.all()) == 2 * context.number_of_users)
assert (TASAllocationReport.objects.last().start_date == context.current_time)
calculated_initial_usage = float(
TASAllocationReport.objects.filter(project_name=context.allocation_source.name).aggregate(Sum('compute_used'))[
'compute_used__sum']) * 60
logging.info("\n\n expected:%s actual:%s \n\n" % (total_expected_usage, int(calculated_initial_usage)))
# just for the purpose of these test cases, we require time in minutes
# conversion from microseconds to hours and then hours to minutes with rounding results in loss of time
# therefore instead of comparing exact values, we check if the difference is not more than a minute (or two)
assert (abs(total_expected_usage - int(calculated_initial_usage)) < 2)
#### Helpers ####
def launch_instance(user, time_created, cpu):
# context.user is admin and regular user
provider = ProviderFactory.create()
from core.models import IdentityMembership, Identity
user_group = IdentityMembership.objects.filter(member__name=user.username)
if not user_group:
user_identity = IdentityFactory.create_identity(
created_by=user,
provider=provider)
else:
user_identity = Identity.objects.all().last()
admin_identity = user_identity
provider_machine = ProviderMachine.objects.all()
if not provider_machine:
machine = ProviderMachineFactory.create_provider_machine(user, user_identity)
else:
machine = ProviderMachine.objects.all().last()
status = InstanceStatusFactory.create(name='active')
instance_state = InstanceFactory.create(
provider_alias=uuid.uuid4(),
source=machine.instance_source,
created_by=user,
created_by_identity=user_identity,
start_date=time_created)
size = Size(alias=uuid.uuid4(), name='small', provider=provider, cpu=cpu, disk=1, root=1, mem=1)
size.save()
InstanceHistoryFactory.create(
status=status,
activity="",
instance=instance_state,
start_date=time_created,
end_date=time_created + timedelta(minutes=30),
size=size
)
return instance_state.provider_alias
def create_reports(end_date=False):
"""
GO through the list of all users or all providers
For each username, get an XSede API map to the 'TACC username'
if 'TACC username' includes a jetstream resource, create a report
"""
user_allocation_list = UserAllocationSource.objects.all()
all_reports = []
if not end_date:
end_date = timezone.now()
last_report_date = TASAllocationReport.objects.order_by('end_date')
if not last_report_date:
last_report_date = end_date
else:
last_report_date = last_report_date.last().end_date
for item in user_allocation_list:
allocation_name = item.allocation_source.name
# CHANGED LINE
project_report = _create_reports_for(item.user, allocation_name, end_date)
if project_report:
all_reports.append(project_report)
# Take care of Deleted Users
# filter user_allocation_source_removed events which are created after the last report date
for event in EventTable.objects.filter(name="user_allocation_source_deleted", timestamp__gte=last_report_date).order_by('timestamp'):
user = AtmosphereUser.objects.get(username=event.entity_id)
allocation_name = event.payload['allocation_source_name']
end_date = event.timestamp
project_report = _create_reports_for(user, allocation_name, end_date)
if project_report:
all_reports.append(project_report)
return all_reports
def _create_reports_for(user, allocation_name, end_date):
driver = TASAPIDriver()
tacc_username = user.username # driver.get_tacc_username(user)
if not tacc_username:
logger.error("No TACC username for user: '{}' which came from allocation id: {}".format(user,
allocation_name))
return
project_name = allocation_name # driver.get_allocation_project_name(allocation_name)
try:
project_report = _create_tas_report_for(
user,
tacc_username,
project_name,
end_date)
return project_report
except TASPluginException:
logger.exception(
"Could not create the report because of the error below"
)
return
def _create_tas_report_for(user, tacc_username, tacc_project_name, end_date):
"""
Create a new report
"""
if not end_date:
raise TASPluginException("Explicit end date required")
if not user:
raise TASPluginException("User missing")
if not tacc_username:
raise TASPluginException("TACC Username missing")
if not tacc_project_name:
raise TASPluginException("OpenStack/TACC Project missing")
last_report = TASAllocationReport.objects.filter(
project_name=tacc_project_name,
user=user
).order_by('end_date').last()
if not last_report:
start_date = user.date_joined
else:
start_date = last_report.end_date
compute_used = total_usage(
user.username, start_date,
allocation_source_name=tacc_project_name,
end_date=end_date)
if compute_used < 0:
raise TASPluginException(
"Compute usage was not accurately calculated for user:%s for start_date:%s and end_date:%s"
% (user, start_date, end_date))
new_report = TASAllocationReport.objects.create(
user=user,
username=tacc_username,
project_name=tacc_project_name,
compute_used=compute_used,
start_date=start_date,
end_date=end_date,
tacc_api=settings.TACC_API_URL)
logger.info("Created New Report:%s" % new_report)
return new_report
| 38.129496
| 139
| 0.707075
|
4a0899af3da8c1dec08eb3f670ffd9a2ba36f851
| 1,129
|
py
|
Python
|
var/spack/repos/builtin/packages/py-azure-storage-blob/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/py-azure-storage-blob/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/py-azure-storage-blob/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyAzureStorageBlob(PythonPackage):
"""Microsoft Azure Blob Storage Client Library for Python"""
homepage = "https://github.com/Azure/azure-storage-python"
pypi = "azure-storage-blob/azure-storage-blob-12.9.0.zip"
maintainers = ['marcusboden']
version('12.9.0', sha256='cff66a115c73c90e496c8c8b3026898a3ce64100840276e9245434e28a864225')
depends_on('py-setuptools', type='build')
depends_on('py-azure-core@1.10:1', type=('build', 'run'))
depends_on('py-msrest@0.6.21:', type=('build', 'run'))
depends_on('py-cryptography@2.1.4:', type=('build', 'run'))
depends_on('py-futures', type=('build', 'run'), when='^python@:2')
depends_on('py-azure-storage-nspkg@3', type=('build', 'run'), when='^python@:2')
depends_on('py-enum34@1.0.4:', type=('build', 'run'), when=('^python@:3.3'))
depends_on('py-typing', type=('build', 'run'), when=('^python@:3.4'))
| 40.321429
| 96
| 0.678477
|
4a0899f9c7d47cd453363c46907f09a1c7e0f3dd
| 3,787
|
py
|
Python
|
seq2seq/beam.py
|
wangqiaowen/atmt_ex04
|
e83592496dc4eba9826c6c39f768aba5097e70fa
|
[
"MIT"
] | null | null | null |
seq2seq/beam.py
|
wangqiaowen/atmt_ex04
|
e83592496dc4eba9826c6c39f768aba5097e70fa
|
[
"MIT"
] | null | null | null |
seq2seq/beam.py
|
wangqiaowen/atmt_ex04
|
e83592496dc4eba9826c6c39f768aba5097e70fa
|
[
"MIT"
] | null | null | null |
import torch
from itertools import count
from queue import PriorityQueue
import numpy as np
class BeamSearch(object):
""" Defines a beam search object for a single input sentence. """
def __init__(self, beam_size, max_len, pad):
self.beam_size = beam_size
self.max_len = max_len
self.pad = pad
self.nodes = PriorityQueue() # beams to be expanded
self.final = PriorityQueue() # beams that ended in EOS
self._counter = count() # for correct ordering of nodes with same score
def add(self, score, node):
""" Adds a new beam search node to the queue of current nodes """
self.nodes.put((score, next(self._counter), node))
def add_final(self, score, node):
""" Adds a beam search path that ended in EOS (= finished sentence) """
# ensure all node paths have the same length for batch ops
missing = self.max_len - node.length
node.sequence = torch.cat((node.sequence.cpu(), torch.tensor([self.pad]*missing).long()))
self.final.put((score, next(self._counter), node))
def get_current_beams(self):
""" Returns beam_size current nodes with the lowest negative log probability """
nodes = []
while not self.nodes.empty() and len(nodes) < self.beam_size:
node = self.nodes.get()
nodes.append((node[0], node[2]))
return nodes
def get_best(self):
""" Returns final node with the lowest negative log probability """
# Merge EOS paths and those that were stopped by
# max sequence length (still in nodes)
merged = PriorityQueue()
for _ in range(self.final.qsize()):
node = self.final.get()
merged.put(node)
for _ in range(self.nodes.qsize()):
node = self.nodes.get()
merged.put(node)
node = merged.get()
node = (node[0], node[2])
return node
# For task 4 diversity promoting beam search.
# To output the n-best lists
def get_top_n(self,beam_size):
""" Returns final node with the lowest negative log probability """
# Merge EOS paths and those that were stopped by
# max sequence length (still in nodes)
merged = PriorityQueue()
nodes = []
for _ in range(self.final.qsize()):
node = self.final.get()
merged.put(node)
for _ in range(self.nodes.qsize()):
node = self.nodes.get()
merged.put(node)
for i in range(merged.qsize()):
node = merged.get()
nodes.append([node[0], node[2]])
return nodes
def prune(self):
""" Removes all nodes but the beam_size best ones (lowest neg log prob) """
nodes = PriorityQueue()
# Keep track of how many search paths are already finished (EOS)
finished = self.final.qsize()
for _ in range(self.beam_size-finished):
node = self.nodes.get()
nodes.put(node)
self.nodes = nodes
class BeamSearchNode(object):
""" Defines a search node and stores values important for computation of beam search path"""
def __init__(self, search, emb, lstm_out, final_hidden, final_cell, mask, sequence, logProb, length):
# Attributes needed for computation of decoder states
self.sequence = sequence
self.emb = emb
self.lstm_out = lstm_out
self.final_hidden = final_hidden
self.final_cell = final_cell
self.mask = mask
# Attributes needed for computation of sequence score
self.logp = logProb
self.length = length
self.search = search
def eval(self):
""" Returns score of sequence up to this node """
return self.logp
| 33.8125
| 105
| 0.611038
|
4a089a1f1cc6c1ffad63fc09d9bfbca37bf8aaa7
| 1,327
|
py
|
Python
|
setup.py
|
vincent-ferotin/sphinxcontrib-secualert
|
19b28a606dc98ff03c678c902879f263ebe70323
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
vincent-ferotin/sphinxcontrib-secualert
|
19b28a606dc98ff03c678c902879f263ebe70323
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
vincent-ferotin/sphinxcontrib-secualert
|
19b28a606dc98ff03c678c902879f263ebe70323
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import (
setup,
find_packages,
)
LONG_DESC = '''
This package contains the `secualert` Sphinx extension.
This extension was created to specifically list as security alerts
items previously targeted as `todo`, and allow listing all alerts
in one list, different from `todolist`.
'''
REQUIRES = [
'Sphinx>=1.8',
]
setup(
name='sphinxcontrib-secualert',
version='0.1',
license='BSD 2-Clause License',
author='Vincent Férotin',
author_email='vincent.ferotin@gmail.com',
description='Sphinx "secualert" extension',
long_description=LONG_DESC,
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Sphinx :: Extension',
'Topic :: Documentation',
'Topic :: Utilities',
],
platforms='any',
packages=find_packages(),
include_package_data=True,
install_requires=REQUIRES,
extras_require={
'dev': [
'Babel',
],
},
namespace_packages=['sphinxcontrib'],
)
| 25.037736
| 66
| 0.629239
|
4a089b4275f13a12352cea059ebba483669b3c9d
| 917
|
py
|
Python
|
tests/selenium/test_sizing_e.py
|
adament/dash-table
|
878f02cada45ff76d32d4b712f5ef3c23447fa52
|
[
"MIT"
] | null | null | null |
tests/selenium/test_sizing_e.py
|
adament/dash-table
|
878f02cada45ff76d32d4b712f5ef3c23447fa52
|
[
"MIT"
] | null | null | null |
tests/selenium/test_sizing_e.py
|
adament/dash-table
|
878f02cada45ff76d32d4b712f5ef3c23447fa52
|
[
"MIT"
] | null | null | null |
import pytest
from test_sizing import szng003_on_prop_change_impl
@pytest.mark.parametrize(
"fixed_columns",
[
# dict(),
dict(fixed_columns=dict(headers=True)),
dict(fixed_columns=dict(headers=True, data=1)),
],
)
@pytest.mark.parametrize(
"fixed_rows",
[
# dict(),
dict(fixed_rows=dict(headers=True)),
dict(fixed_rows=dict(headers=True, data=1)),
],
)
@pytest.mark.parametrize(
"merge_duplicate_headers",
[dict(merge_duplicate_headers=True), dict(merge_duplicate_headers=False)],
)
@pytest.mark.parametrize(
"callback_props", [dict(style_table=dict(width=500, minWidth=500, maxWidth=500)),],
)
def test_szng003_e_on_prop_change(
test, fixed_columns, fixed_rows, merge_duplicate_headers, callback_props
):
szng003_on_prop_change_impl(
test, fixed_columns, fixed_rows, merge_duplicate_headers, callback_props
)
| 26.2
| 87
| 0.7012
|
4a089c06bf9d99ad431df9ab9897f697eefd7fcb
| 7,385
|
py
|
Python
|
research/adversarial_text/graphs_test.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | 1
|
2019-09-11T09:41:11.000Z
|
2019-09-11T09:41:11.000Z
|
research/adversarial_text/graphs_test.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | null | null | null |
research/adversarial_text/graphs_test.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import operator
import os
import random
import shutil
import string
import tempfile
# Dependency imports
import tensorflow as tf
import graphs
from data import data_utils
flags = tf.app.flags
FLAGS = flags.FLAGS
data = data_utils
flags.DEFINE_integer('task', 0, 'Task id; needed for SyncReplicas test')
def _build_random_vocabulary(vocab_size=100):
"""Builds and returns a dict<term, id>."""
vocab = set()
while len(vocab) < (vocab_size - 1):
rand_word = ''.join(
random.choice(string.ascii_lowercase)
for _ in range(random.randint(1, 10)))
vocab.add(rand_word)
vocab_ids = dict([(word, i) for i, word in enumerate(vocab)])
vocab_ids[data.EOS_TOKEN] = vocab_size - 1
return vocab_ids
def _build_random_sequence(vocab_ids):
seq_len = random.randint(10, 200)
ids = vocab_ids.values()
seq = data.SequenceWrapper()
for token_id in [random.choice(ids) for _ in range(seq_len)]:
seq.add_timestep().set_token(token_id)
return seq
def _build_vocab_frequencies(seqs, vocab_ids):
vocab_freqs = defaultdict(int)
ids_to_words = dict([(i, word) for word, i in vocab_ids.iteritems()])
for seq in seqs:
for timestep in seq:
vocab_freqs[ids_to_words[timestep.token]] += 1
vocab_freqs[data.EOS_TOKEN] = 0
return vocab_freqs
class GraphsTest(tf.test.TestCase):
"""Test graph construction methods."""
@classmethod
def setUpClass(cls):
# Make model small
FLAGS.batch_size = 2
FLAGS.num_timesteps = 3
FLAGS.embedding_dims = 4
FLAGS.rnn_num_layers = 2
FLAGS.rnn_cell_size = 4
FLAGS.cl_num_layers = 2
FLAGS.cl_hidden_size = 4
FLAGS.vocab_size = 10
# Set input/output flags
FLAGS.data_dir = tempfile.mkdtemp()
# Build and write sequence files.
vocab_ids = _build_random_vocabulary(FLAGS.vocab_size)
seqs = [_build_random_sequence(vocab_ids) for _ in range(5)]
seqs_label = [
data.build_labeled_sequence(seq, random.choice([True, False]))
for seq in seqs
]
seqs_lm = [data.build_lm_sequence(seq) for seq in seqs]
seqs_ae = [data.build_seq_ae_sequence(seq) for seq in seqs]
seqs_rev = [data.build_reverse_sequence(seq) for seq in seqs]
seqs_bidir = [
data.build_bidirectional_seq(seq, rev)
for seq, rev in zip(seqs, seqs_rev)
]
seqs_bidir_label = [
data.build_labeled_sequence(bd_seq, random.choice([True, False]))
for bd_seq in seqs_bidir
]
filenames = [
data.TRAIN_CLASS, data.TRAIN_LM, data.TRAIN_SA, data.TEST_CLASS,
data.TRAIN_REV_LM, data.TRAIN_BD_CLASS, data.TEST_BD_CLASS
]
seq_lists = [
seqs_label, seqs_lm, seqs_ae, seqs_label, seqs_rev, seqs_bidir,
seqs_bidir_label
]
for fname, seq_list in zip(filenames, seq_lists):
with tf.python_io.TFRecordWriter(
os.path.join(FLAGS.data_dir, fname)) as writer:
for seq in seq_list:
writer.write(seq.seq.SerializeToString())
# Write vocab.txt and vocab_freq.txt
vocab_freqs = _build_vocab_frequencies(seqs, vocab_ids)
ordered_vocab_freqs = sorted(
vocab_freqs.items(), key=operator.itemgetter(1), reverse=True)
with open(os.path.join(FLAGS.data_dir, 'vocab.txt'), 'w') as vocab_f:
with open(os.path.join(FLAGS.data_dir, 'vocab_freq.txt'), 'w') as freq_f:
for word, freq in ordered_vocab_freqs:
vocab_f.write('{}\n'.format(word))
freq_f.write('{}\n'.format(freq))
@classmethod
def tearDownClass(cls):
shutil.rmtree(FLAGS.data_dir)
def setUp(self):
# Reset FLAGS
FLAGS.rnn_num_layers = 1
FLAGS.sync_replicas = False
FLAGS.adv_training_method = None
FLAGS.num_candidate_samples = -1
FLAGS.num_classes = 2
FLAGS.use_seq2seq_autoencoder = False
# Reset Graph
tf.reset_default_graph()
def testClassifierGraph(self):
FLAGS.rnn_num_layers = 2
model = graphs.VatxtModel()
train_op, _, _ = model.classifier_training()
# Pretrained vars: embedding + LSTM layers
self.assertEqual(
len(model.pretrained_variables), 1 + 2 * FLAGS.rnn_num_layers)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess)
sess.run(train_op)
def testLanguageModelGraph(self):
train_op, _, _ = graphs.VatxtModel().language_model_training()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess)
sess.run(train_op)
def testMulticlass(self):
FLAGS.num_classes = 10
graphs.VatxtModel().classifier_graph()
def testATMethods(self):
at_methods = [None, 'rp', 'at', 'vat', 'atvat']
for method in at_methods:
FLAGS.adv_training_method = method
with tf.Graph().as_default():
graphs.VatxtModel().classifier_graph()
# Ensure variables have been reused
# Embedding + LSTM layers + hidden layers + logits layer
expected_num_vars = 1 + 2 * FLAGS.rnn_num_layers + 2 * (
FLAGS.cl_num_layers) + 2
self.assertEqual(len(tf.trainable_variables()), expected_num_vars)
def testSyncReplicas(self):
FLAGS.sync_replicas = True
graphs.VatxtModel().language_model_training()
def testCandidateSampling(self):
FLAGS.num_candidate_samples = 10
graphs.VatxtModel().language_model_training()
def testSeqAE(self):
FLAGS.use_seq2seq_autoencoder = True
graphs.VatxtModel().language_model_training()
def testBidirLM(self):
graphs.VatxtBidirModel().language_model_graph()
def testBidirClassifier(self):
at_methods = [None, 'rp', 'at', 'vat', 'atvat']
for method in at_methods:
FLAGS.adv_training_method = method
with tf.Graph().as_default():
graphs.VatxtBidirModel().classifier_graph()
# Ensure variables have been reused
# Embedding + 2 LSTM layers + hidden layers + logits layer
expected_num_vars = 1 + 2 * 2 * FLAGS.rnn_num_layers + 2 * (
FLAGS.cl_num_layers) + 2
self.assertEqual(len(tf.trainable_variables()), expected_num_vars)
def testEvalGraph(self):
_, _ = graphs.VatxtModel().eval_graph()
def testBidirEvalGraph(self):
_, _ = graphs.VatxtBidirModel().eval_graph()
if __name__ == '__main__':
tf.test.main()
| 32.676991
| 81
| 0.670142
|
4a089c1ef87cafdaf188778f284525a1a7e2dbe7
| 522
|
py
|
Python
|
tests/test_hg.py
|
dvershinin/whatversion
|
72341917136c35cde24fa12c92c9616abc65e7f3
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_hg.py
|
dvershinin/whatversion
|
72341917136c35cde24fa12c92c9616abc65e7f3
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_hg.py
|
dvershinin/whatversion
|
72341917136c35cde24fa12c92c9616abc65e7f3
|
[
"BSD-2-Clause"
] | null | null | null |
import os
from packaging import version
from lastversion.lastversion import latest
# change dir to tests directory to make relative paths possible
os.chdir(os.path.dirname(os.path.realpath(__file__)))
def test_merc_1():
"""Test a Mercurial repo."""
repo = 'https://hg.dillo.org/dillo/'
v = latest(repo)
assert v == version.parse('3.0.5')
def test_hg_nginx():
"""Test NGINX."""
repo = "https://nginx.org/"
output = latest(repo, 'version')
assert output >= version.parse("1.18.0")
| 19.333333
| 63
| 0.666667
|
4a089c7c9a1729722edfb6f366edc991bad29e93
| 3,983
|
py
|
Python
|
tests/bls/test_expand_message_xmd.py
|
kclowes/py_ecc
|
832f3901b9987294698d3b1a6448133da99daf60
|
[
"MIT"
] | 122
|
2017-07-15T14:17:43.000Z
|
2022-03-15T13:26:45.000Z
|
tests/bls/test_expand_message_xmd.py
|
kclowes/py_ecc
|
832f3901b9987294698d3b1a6448133da99daf60
|
[
"MIT"
] | 97
|
2017-07-17T16:01:53.000Z
|
2022-01-06T23:27:40.000Z
|
tests/bls/test_expand_message_xmd.py
|
kclowes/py_ecc
|
832f3901b9987294698d3b1a6448133da99daf60
|
[
"MIT"
] | 75
|
2017-07-17T21:04:26.000Z
|
2022-03-03T10:22:43.000Z
|
from hashlib import sha256
import pytest
from py_ecc.bls.hash import expand_message_xmd
# The test vectors from
# https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-09#appendix-I.1
DST = b'QUUX-V01-CS02-with-expander'
@pytest.mark.parametrize(
'msg, len_in_bytes, uniform_bytes',
[
(b'', 0x20, bytes.fromhex('f659819a6473c1835b25ea59e3d38914c98b374f0970b7e4c92181df928fca88')),
(b'abc', 0x20, bytes.fromhex('1c38f7c211ef233367b2420d04798fa4698080a8901021a795a1151775fe4da7')),
(b'abcdef0123456789', 0x20, bytes.fromhex('8f7e7b66791f0da0dbb5ec7c22ec637f79758c0a48170bfb7c4611bd304ece89')), # noqa: E501
(b'q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq', 0x20, bytes.fromhex('72d5aa5ec810370d1f0013c0df2f1d65699494ee2a39f72e1716b1b964e1c642')), # noqa: E501
(b'a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 0x20, bytes.fromhex('3b8e704fc48336aca4c2a12195b720882f2162a4b7b13a9c350db46f429b771b')), # noqa: E501
(b'', 0x80, bytes.fromhex('8bcffd1a3cae24cf9cd7ab85628fd111bb17e3739d3b53f89580d217aa79526f1708354a76a402d3569d6a9d19ef3de4d0b991e4f54b9f20dcde9b95a66824cbdf6c1a963a1913d43fd7ac443a02fc5d9d8d77e2071b86ab114a9f34150954a7531da568a1ea8c760861c0cde2005afc2c114042ee7b5848f5303f0611cf297f')), # noqa: E501
(b'abc', 0x80, bytes.fromhex('fe994ec51bdaa821598047b3121c149b364b178606d5e72bfbb713933acc29c186f316baecf7ea22212f2496ef3f785a27e84a40d8b299cec56032763eceeff4c61bd1fe65ed81decafff4a31d0198619c0aa0c6c51fca15520789925e813dcfd318b542f8799441271f4db9ee3b8092a7a2e8d5b75b73e28fb1ab6b4573c192')), # noqa: E501
(b'abcdef0123456789', 0x80, bytes.fromhex('c9ec7941811b1e19ce98e21db28d22259354d4d0643e301175e2f474e030d32694e9dd5520dde93f3600d8edad94e5c364903088a7228cc9eff685d7eaac50d5a5a8229d083b51de4ccc3733917f4b9535a819b445814890b7029b5de805bf62b33a4dc7e24acdf2c924e9fe50d55a6b832c8c84c7f82474b34e48c6d43867be')), # noqa: E501
(b'q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq', 0x80, bytes.fromhex('48e256ddba722053ba462b2b93351fc966026e6d6db493189798181c5f3feea377b5a6f1d8368d7453faef715f9aecb078cd402cbd548c0e179c4ed1e4c7e5b048e0a39d31817b5b24f50db58bb3720fe96ba53db947842120a068816ac05c159bb5266c63658b4f000cbf87b1209a225def8ef1dca917bcda79a1e42acd8069')), # noqa: E501
(b'a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 0x80, bytes.fromhex('396962db47f749ec3b5042ce2452b619607f27fd3939ece2746a7614fb83a1d097f554df3927b084e55de92c7871430d6b95c2a13896d8a33bc48587b1f66d21b128a1a8240d5b0c26dfe795a1a842a0807bb148b77c2ef82ed4b6c9f7fcb732e7f94466c8b51e52bf378fba044a31f5cb44583a892f5969dcd73b3fa128816e')), # noqa: E501
]
)
def test_expand_message_xmd_sha256(msg, len_in_bytes, uniform_bytes):
assert expand_message_xmd(
msg=msg,
DST=DST,
len_in_bytes=len_in_bytes,
hash_function=sha256,
) == uniform_bytes
| 117.147059
| 826
| 0.89179
|
4a089ce7ce5000e4bc5ef9e3d908ad742500e9fe
| 43,497
|
py
|
Python
|
app/case/views.py
|
Allen-lang/FXTest
|
ccbc5e986f4d0f9d3145a857674529380d873719
|
[
"MIT"
] | 1
|
2019-12-31T01:53:04.000Z
|
2019-12-31T01:53:04.000Z
|
app/case/views.py
|
fuyang123/FXTest
|
ccbc5e986f4d0f9d3145a857674529380d873719
|
[
"MIT"
] | null | null | null |
app/case/views.py
|
fuyang123/FXTest
|
ccbc5e986f4d0f9d3145a857674529380d873719
|
[
"MIT"
] | null | null | null |
"""
@author: lileilei
@file: view.py
@time: 2018/1/31 13:20
"""
from flask import redirect, request, render_template, \
session, url_for, flash, jsonify, Blueprint, make_response, send_from_directory
from app.models import *
from app.form import *
from config import Dingtalk_access_token
import os, time, datetime, json
from common.pares_excel_inter import paser_interface_case
from common.py_html import createHtml
from common.requ_case import Api
from common.panduan import assert_in, pare_result_mysql
from app.test_case.Test_case import ApiTestCase
from common.send_email import send_emails
from flask.views import View, MethodView
from flask_login import current_user, login_required
from common.Dingtalk import send_ding
from common.mysqldatabasecur import *
from config import Config_daoru_xianzhi, redis_host, \
redis_port, redis_save_result_db, save_duration
from common.excet_excel import create_interface_case
from common.hebinglist import listmax
from common.pyredis import ConRedisOper
case = Blueprint('case', __name__)
def save_reslut(key, value):
m = ConRedisOper(host=redis_host, port=redis_port, db=redis_save_result_db)
m.sethase(key, value, save_duration)
def get_reslut(key):
m = ConRedisOper(host=redis_host, port=redis_port, db=redis_save_result_db)
reslit = m.getset(key)
return reslit
def get_pro_mo():
projects = Project.query.filter_by(status=False).all()
model = Model.query.filter_by(status=False).all()
return projects, model
class AddtestcaseView(View):
methods = ['GET', 'POST']
@login_required
def dispatch_request(self):
form = Interface_yong_Form()
project, models = get_pro_mo()
inrterface_list = Interface.query.filter_by(status=False).all()
mock_yilai = Mockserver.query.filter_by(delete=False).all()
if current_user.is_sper == True:
projects = Project.query.filter_by(status=False).order_by('-id').all()
else:
projects = []
id = []
for i in current_user.quanxians:
if (i.projects in id) == False:
if i.projects.status == False:
projects.append(i.projects)
id.append(i.projects)
if request.method == 'POST' and form.validate_on_submit:
ci = request.form.get("ci")
if ci == "是":
is_ci = True
else:
is_ci = False
save = request.form.get('save')
yongli_nam = request.form.get('project')
mode = request.form.get('mode')
interface_name = request.form.get('interface_name')
interface_url = request.form.get('interface_url')
interface_header = request.form.get('interface_headers')
interface_meth = request.form.get('interface_meth')
interface_can = request.form.get('interface_can')
interface_re = request.form.get('interface_rest')
yilai_data = request.values.get("yilaicanshu")
yilai_test = request.values.get("jiekou")
shifoujiaoyan = request.values.get("database")
interface_type = request.values.get('interface_type')
if shifoujiaoyan == 'on':
databasesql = request.values.get('databasesql')
databijiao = request.values.get('databijiao')
is_database = True
else:
databasesql = None
databijiao = None
is_database = False
if yilai_test is None or yilai_test == '请选择依赖接口':
yilai_dat = None
yilai_tes = None
else:
yilai_tes = yilai_test
if yilai_data is None or yilai_data == '':
flash(u'选择依赖后必须填写获取依赖的接口的字段')
return render_template('add/add_test_case.html', form=form, projects=projects, models=models,
inrterface_list=inrterface_list, mock_yilai=mock_yilai)
yilai_dat = yilai_data
if yongli_nam == '' or mode == '' or interface_header == '' or interface_url == '' or interface_meth == '':
flash(u'请准确填写用例的各项信息')
return render_template('add/add_test_case.html', form=form, projects=projects, models=models,
inrterface_list=inrterface_list, mock_yilai=mock_yilai)
project_id = Project.query.filter_by(project_name=yongli_nam).first().id
models_id = Model.query.filter_by(model_name=mode).first().id
interface = Interface.query.filter_by(Interface_name=interface_name).first().id
if save == 1 or save == '1':
saves = False
elif save == 2 or save == '2':
saves = True
else:
flash(u'选择保存测试结果出现异常')
return render_template('add/add_test_case.html', form=form, projects=projects, mock_yilai=mock_yilai,
models=models, inrterface_list=inrterface_list)
try:
newcase = InterfaceTest(projects_id=project_id, model_id=models_id, interface_id=interface,
Interface_headers=interface_header, bian_num=interface_url,
Interface_meth=interface_meth, Interface_pase=interface_can,
Interface_assert=interface_re, Interface_user_id=current_user.id,
saveresult=saves, pid=(yilai_tes), getattr_p=yilai_dat,
is_database=is_database, chaxunshujuku=databasesql,
databaseziduan=databijiao,
Interface_name=interface_name, Interface_url=interface_url,
interface_type=interface_type,is_ci=is_ci)
db.session.add(newcase)
db.session.commit()
try:
for key, value in dict(eval(interface_can)):
if str(value).startswith("#"):
if str(value).split(".")[0] == '#action':
action = Action.query.filter_by(name=str(value).split(".")[1]).first()
if not action:
flash(u'操作不存在')
return render_template('add/add_test_case.html', form=form, projects=projects,
models=models,
inrterface_list=inrterface_list, mock_yilai=mock_yilai)
caseac = CaseAction(case=newcase, action=action, actiontype=action.category,
filed=key)
db.session.add(caseac)
db.session.commit()
elif str(value).split(".")[0] == '#conf':
action = GeneralConfiguration.query.filter_by(name=str(value).split(".")[1]).first()
if not action:
flash(u'配置不存在')
return render_template('add/add_test_case.html', form=form, projects=projects,
models=models,
inrterface_list=inrterface_list, mock_yilai=mock_yilai)
caseac = CaseGeneral(case=newcase, general=action, filed=key)
db.session.add(caseac)
db.session.commit()
else:
pass
except:
flash(u'测试用例参数仅支持dict格式')
return render_template('add/add_test_case.html', form=form, projects=projects, models=models,
inrterface_list=inrterface_list, mock_yilai=mock_yilai)
flash(u'添加用例成功')
return redirect(url_for('home.yongli'))
except Exception as e:
db.session.rollback()
flash(u'添加用例失败,原因是:%s' % e)
return redirect(url_for('home.yongli'))
return render_template('add/add_test_case.html', form=form, projects=projects, models=models,
inrterface_list=inrterface_list, mock_yilai=mock_yilai)
class EditcaseView(View):
methods = ['GET', 'POST']
@login_required
def dispatch_request(self, id):
project, models = get_pro_mo()
inrterface_list = Interface.query.filter_by(status=False).all()
mock_yilai = Mockserver.query.filter_by(delete=False).all()
if current_user.is_sper == True:
projects = Project.query.filter_by(status=False).order_by('-id').all()
else:
projects = []
id = []
for i in current_user.quanxians:
if (i.projects in id) == False:
if i.projects.status == False:
projects.append(i.projects)
id.append(i.projects)
edit_case = InterfaceTest.query.filter_by(id=id, status=False).first()
if not edit_case:
flash(u'编辑用例不存在!或者已经删除')
return redirect(url_for('home.yongli'))
if request.method == 'POST':
save = request.form.get('save')
yongli_nam = request.form.get('project')
mode = request.form.get('model')
url = request.form.get('url')
meth = request.form.get('meth')
headers = request.form.get('headers')
parme = request.form.get('parme')
reque = request.form.get('reque')
ci = request.form.get("ci")
yilai_data = request.values.get("yilaicanshu")
yilai_test = request.values.get("jiekou")
inerfa = request.form.get('inerfa')
shifoujiaoyan = request.values.get("database")
interface_type = request.values.get('interface_type')
if ci == "是":
is_ci = True
else:
is_ci = False
if shifoujiaoyan == 'on':
databasesql = request.values.get('databasesql')
databijiao = request.values.get('databijiao')
is_database = True
else:
databasesql = None
databijiao = None
is_database = False
if yilai_test is None or yilai_test == '请选择依赖接口' or yilai_test == '':
yilai_dat = None
yilai_tes = None
else:
yilai_tes = yilai_test
if yilai_data is None or yilai_data == '':
flash(u'选择依赖后必须填写获取依赖的接口的字段')
return render_template('edit/edit_case.html', edit=edit_case,
projects=projects, models=models,
inerfacelist=inrterface_list, mock_yilai=mock_yilai)
yilai_dat = yilai_data
if yongli_nam == None or mode == None or url == '' or headers == '' or meth == '' or reque == '':
flash(u'请确定各项参数都正常填写')
return render_template('edit/edit_case.html', edit=edit_case, projects=projects,
models=models,
inerfacelist=inrterface_list, mock_yilai=mock_yilai)
projects_id = Project.query.filter_by(id=(yongli_nam)).first().id
model_id = Model.query.filter_by(model_name=mode).first().id
interface = Interface.query.filter_by(Interface_name=inerfa).first().id
if save is None:
saves = False
elif save == '是':
saves = True
else:
flash(u'选择保存测试用例异常')
return render_template('edit/edit_case.html',
edit=edit_case, projects=projects, models=models,
inerfacelist=inrterface_list, mock_yilai=mock_yilai)
edit_case.projects_id = projects_id
edit_case.model_id = model_id
edit_case.interface_id = interface
edit_case.bianhao = url
edit_case.Interface_headers = headers
edit_case.Interface_meth = meth
edit_case.Interface_pase = parme
edit_case.Interface_assert = reque
edit_case.Interface_user_id = current_user.id
edit_case.saveresult = saves
edit_case.pid = yilai_tes
edit_case.getattr_p = yilai_dat
edit_case.is_database = is_database
edit_case.chaxunshujuku = databasesql
edit_case.databaseziduan = databijiao
edit_case.interface_type = interface_type
edit_case.is_ci = is_ci
db.session.commit()
try:
actioncase = CaseAction.query.filter_by(case=edit_case.id).all()
configcase = CaseGeneral.query.filter_by(case=edit_case.id).all()
for i in actioncase:
db.session.delete(i)
for m in configcase:
db.session.delete(m)
db.session.commit()
try:
for key, value in dict(eval(parme)):
if str(value).startswith("#"):
if str(value).split(".")[0] == '#action':
action = Action.query.filter_by(name=str(value).split(".")[1]).first()
if not action:
flash(u'操作不存在')
return render_template('edit/edit_case.html', edit=edit_case,
projects=projects, models=models,
inerfacelist=inrterface_list, mock_yilai=mock_yilai)
caseac = CaseAction(case=edit_case, action=action, actiontype=action.category,
filed=key)
db.session.add(caseac)
db.session.commit()
elif str(value).split(".")[0] == '#conf':
action = GeneralConfiguration.query.filter_by(name=str(value).split(".")[1]).first()
if not action:
flash(u'配置不存在')
return render_template('edit/edit_case.html', edit=edit_case,
projects=projects, models=models,
inerfacelist=inrterface_list, mock_yilai=mock_yilai)
caseac = CaseGeneral(case=edit_case, general=action, filed=key)
db.session.add(caseac)
db.session.commit()
else:
pass
except:
flash(u'测试用例参数仅支持dict格式')
return render_template('edit/edit_case.html', edit=edit_case,
projects=projects, models=models,
inerfacelist=inrterface_list, mock_yilai=mock_yilai)
db.session.commit()
flash(u'用例:%s编辑成功' % id)
return redirect(url_for('home.yongli'))
except Exception as e:
print(e)
db.session.rollback()
flash(u'用例:%s 编辑失败,请重新编辑!' % id)
return render_template('edit/edit_case.html',
edit=edit_case, projects=projects, models=models,
inerfacelist=inrterface_list, mock_yilai=mock_yilai)
return render_template('edit/edit_case.html', edit=edit_case, projects=projects,
models=models, inerfacelist=inrterface_list, mock_yilai=mock_yilai)
class SeryongliView(MethodView):
@login_required
def post(self):
id = request.get_data('id')
project = json.loads(id.decode('utf-8'))
if not project:
return jsonify({'msg': '没有发送数据', 'code': 39})
project_name = str(project['project'])
project_is = Project.query.filter_by(project_name=project_name, status=False).first()
testevent = Interfacehuan.query.filter_by(projects=project_is, status=False).all()
interfatype = project['interface_type']
if interfatype == 'http':
typeinterface = 'http'
elif interfatype == 'dubbo':
typeinterface = 'dubbo'
else:
typeinterface = 'none'
if project_is.status is True:
return jsonify({'msg': '项目已经删除', 'code': 40})
intertestcases = InterfaceTest.query.filter_by(projects_id=project_is.id, status=False,
interface_type=str(interfatype)).order_by('-id').all()
interfacelist = []
testeventlist = []
for testeven in testevent:
testeventlist.append({'url': testeven.url, 'id': testeven.id})
for interface in intertestcases:
interfacelist.append({'id': interface.id, 'model': interface.models.model_name,
"project": interface.projects.project_name,
'bianhao': interface.bian_num,
'interface': interface.interfaces.Interface_name,
'Interface_name': interface.Interface_name,
'Interface_headers': interface.Interface_headers,
'Interface_url': interface.Interface_url,
'Interface_meth': interface.Interface_meth,
'Interface_pase': interface.Interface_pase,
'Interface_assert': interface.Interface_assert,
'Interface_is_tiaoshi': interface.Interface_is_tiaoshi,
'Interface_tiaoshi_shifou': interface.Interface_tiaoshi_shifou})
return jsonify(({'msg': '成功', 'code': 200, 'data': interfacelist, 'url': testeventlist,
'typeinter': typeinterface}))
class DaorucaseView(View):
methods = ['GET', 'POST']
@login_required
def dispatch_request(self):
if request.method == 'POST':
file = request.files['myfile']
if file and '.' in file.filename and file.filename.split('.')[1] == 'xlsx':
filename = 'jiekoucase.xlsx'
file.save(filename)
jiekou_bianhao, interface_name, project_nam, model_nam, interface_url, interfac_header, \
interface_meth, interface_par, interface_bas, interface_type, is_save_result, yilai_is, \
yilai, yilai_ziduan, is_cha_data, data_sql, paser_base = paser_interface_case(filename)
if len(yilai) > Config_daoru_xianzhi:
flash(u'一次导入超过了系统的上限')
return redirect(url_for('home.daoru_case'))
try:
for i in range(len(jiekou_bianhao)):
projects_id = Project.query.filter_by(project_name=str(project_nam[i])).first()
model_id = Model.query.filter_by(model_name=str(model_nam[i])).first()
if projects_id is None or model_id is None:
flash(u'导入失败,项目或者模块不存在')
return redirect(url_for('home.daoru_case'))
if is_save_result[i] == '是':
save_reslt = True
elif is_save_result[i] == '否':
save_reslt = False
else:
save_reslt = False
if is_cha_data[i] == '是':
chaxun = True
elif is_cha_data[i] == '否':
chaxun = False
else:
chaxun = False
if yilai_is[i] == '是':
yilai_case = yilai[i]
ziduan_case = yilai_ziduan[i]
else:
yilai_case = None
ziduan_case = None
new_interface = InterfaceTest(projects_id=projects_id.id,
model_id=model_id.id,
Interface_name=str(interface_name[i]),
Interface_url=str(interface_url[i]),
Interface_headers=interfac_header[i],
Interface_meth=str(interface_meth[i]),
interface_type=str(interface_type[i]),
Interface_pase=(interface_par[i]),
Interface_assert=str(interface_bas[i]),
saveresult=save_reslt,
is_database=chaxun,
chaxunshujuku=data_sql[i],
databaseziduan=paser_base[i],
pid=yilai_case,
getattr_p=ziduan_case,
Interface_user_id=User.query.filter_by(
username=session.get('username')).first().id)
db.session.add(new_interface)
db.session.commit()
flash(u'导入成功')
return redirect(url_for('home.yongli'))
except Exception as e:
db.session.rollback()
flash(u'导入失败,原因:%s' % e)
return render_template('daoru_case.html')
flash(u'导入失败')
return render_template('daoru_case.html')
return render_template('daoru_case.html')
class DuoyongliView(View):
methods = ['GET', 'POST']
@login_required
def dispatch_request(self):
next = request.headers.get('Referer')
starttime = datetime.datetime.now()
star = time.time()
day = time.strftime("%Y%m%d%H%M", time.localtime(time.time()))
pad = os.getcwd()
file_dir = pad + '/app/upload'
file = os.path.join(file_dir, (day + '.log'))
if os.path.exists(file) is False:
os.system('touch %s' % file)
filepath = os.path.join(file_dir, (day + '.html'))
if os.path.exists(filepath) is False:
os.system(r'touch %s' % filepath)
if request.method == 'POST':
f_dingding = request.form.get('dingding')
me = request.form.getlist('yongli')
testurl = request.form.get('urltest')
if len(me) <= 1:
flash(u'请选择一个以上的用例来执行')
return redirect(next or url_for('yongli'))
if testurl is None:
flash(u'请选择测试环境')
return redirect(next or url_for('yongli'))
projecct_list = []
model_list = []
Interface_name_list = []
Interface_url_list = []
Interface_meth_list = []
Interface_pase_list = []
Interface_assert_list = []
Interface_headers_list = []
Interface_pid_list = []
Interface_yilai_list = []
Interface_save_list = []
Interface_is_data_list = []
Interface_mysql_list = []
Interface_msyql_ziduan_list = []
id_list = []
for case in me:
case_one = InterfaceTest.query.filter_by(id=case).first()
Interface_is_data_list.append(case_one.is_database)
Interface_mysql_list.append(case_one.chaxunshujuku)
Interface_msyql_ziduan_list.append(case_one.databaseziduan)
id_list.append(case_one.id)
projecct_list.append(case_one.projects)
model_list.append(case_one.models)
Interface_url_list.append(case_one.interfaces.Interface_url)
Interface_name_list.append(case_one.Interface_name)
Interface_meth_list.append(case_one.Interface_meth)
Interface_pase_list.append(case_one.Interface_pase)
Interface_assert_list.append(case_one.Interface_assert)
Interface_headers_list.append(case_one.Interface_headers)
Interface_pid_list.append(case_one.pid)
Interface_yilai_list.append(case_one.getattr_p)
Interface_save_list.append(case_one.saveresult)
if (len(set(projecct_list))) > 1:
flash('目前单次只能执行一个项目')
return redirect(next or url_for('duoyongli'))
testevent = Interfacehuan.query.filter_by(url=testurl).first()
try:
apitest = ApiTestCase(inteface_url=Interface_url_list, inteface_meth=Interface_meth_list,
inteface_parm=Interface_pase_list, inteface_assert=Interface_assert_list,
file=file, headers=Interface_headers_list, pid=Interface_pid_list,
yilaidata=Interface_yilai_list, saveresult=Interface_save_list,
id_list=id_list, is_database=Interface_is_data_list,
data_mysql=Interface_mysql_list,
data_ziduan=Interface_msyql_ziduan_list, urltest=testevent)
result_toal, result_pass, result_fail, relusts, bask_list, result_cashu, \
result_wei, result_except, spend_list = apitest.testapi()
large, minx, pinglun = listmax(list2=spend_list)
endtime = datetime.datetime.now()
end = time.time()
createHtml(titles=u'接口测试报告', filepath=filepath, starttime=starttime, endtime=endtime,
passge=result_pass, fail=result_fail, id=id_list, name=projecct_list,
headers=Interface_headers_list, coneent=Interface_url_list, url=Interface_meth_list,
meth=Interface_pase_list, yuqi=Interface_assert_list, json=bask_list, relusts=relusts,
excepts=result_except, yuqis=result_cashu, weizhi=result_wei, maxs=large, mins=minx,
pingluns=pinglun)
hour = end - star
user_id = current_user.id
new_reust = TestResult(Test_user_id=user_id, test_num=result_toal, pass_num=result_pass,
fail_num=result_fail, test_time=starttime, hour_time=hour,
test_rep=(day + '.html'), test_log=(day + '.log'),
Exception_num=result_except, can_num=result_cashu,
wei_num=result_wei, projects_id=projecct_list[0].id)
db.session.add(new_reust)
db.session.commit()
if f_dingding == 'email':
email = EmailReport.query.filter_by(email_re_user_id=int(current_user.id),
default_set=True).first()
if email:
m = send_emails(sender=email.send_email, receivers=email.to_email,
password=email.send_email_password,
smtp=email.stmp_email, port=email.port, fujian1=file, fujian2=filepath,
subject=u'%s用例执行测试报告' % day, url='http://127.0.0.1:5000/test_rep')
if m == False:
flash(u'发送邮件失败,请检查您默认的邮件设置是否正确')
return redirect(url_for('home.test_rep'))
flash(u'测试已经完成,并且给您默认设置发送了测试报告')
return redirect(url_for('home.test_rep'))
flash(u'无法完成,需要去您的个人设置去设置一个默认的邮件发送')
return redirect(url_for('home.yongli'))
if f_dingding == 'dingding':
send = send_ding(content="多用例测试已经完成,通过用例:%s,失败用例:%s,详情见测试报告" % (result_pass, result_fail),
Dingtalk_access_token=Dingtalk_access_token)
if send is True:
flash(u'测试报告已经发送钉钉讨论群,测试报告已经生成!')
return redirect(url_for('home.yongli'))
flash(u'测试报告发送钉钉讨论群失败!请检查相关配置!')
return redirect(next or url_for('home.yongli'))
flash(u'测试已经完成,测试报告已经生成')
return redirect(url_for('home.test_rep'))
except Exception as e:
flash(u'测试失败,出错原因:%s' % e)
return redirect(next or url_for('home.yongli'))
return redirect(url_for('home.yongli'))
class MakeonlyoneCase(MethodView):
@login_required
def post(self):
projec = request.get_json()
case_id = projec['caseid']
url = projec['url']
testevent = Interfacehuan.query.filter_by(url=str(url)).first()
if not testevent:
return jsonify({'code': 41, 'msg': '请确定你所选择的测试环境是否真实存在!'})
case = InterfaceTest.query.filter_by(id=int(case_id), status=False).first()
if not case:
return jsonify({'code': 42, 'msg': '请确定你要测试的用力是否存在!'})
try:
if case.interface_type == 'http':
if case.pid is not None and case.pid != 'None' and case.pid != '':
tesyi = get_reslut(key=case.id + "&" + url)
if tesyi is not None:
canshu = case.getattr_p
try:
testres = eval(tesyi.decode('utf-8'))
yilaidata = eval(testres)[canshu]
except Exception as e:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
db.session.commit()
return jsonify({'code': 44, 'msg': '获取依赖数据失败,原因:%s' % e})
try:
pasrms = eval(case.Interface_pase)
pasrms.update({canshu: yilaidata})
except:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
db.session.commit()
return jsonify({'code': 45, 'msg': '测试参数应该是字典格式!'})
else:
try:
pasrms = eval(case.Interface_pase)
except:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
db.session.commit()
return jsonify({'code': 46, 'msg': '测试参数应该是字典格式!'})
else:
try:
pasrms = eval(case.Interface_pase)
except:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
db.session.commit()
return jsonify({'code': 47, 'msg': '测试参数应该是字典格式!'})
new_headers = case.Interface_headers
if new_headers == 'None':
ne = {'host': url}
elif new_headers is None:
ne = {'host': url}
else:
try:
ne = eval(new_headers)
ne['host'] = url
except:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
db.session.commit()
return jsonify({'code': 48, 'msg': '测试的请求头应该是字典格式的!'})
if case.is_database is True:
if case.chaxunshujuku is None or case.databaseziduan is None:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
db.session.commit()
return jsonify({'code': 49, 'msg': '要判断数据库但是没有找到数据库的语句或者断言的字段!'})
if testevent.database is None:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
db.session.commit()
return jsonify({'code': 50, 'msg': '测试环境数据库url配置不存在'})
if testevent.dbport is None:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
db.session.commit()
return jsonify({'code': 51, 'msg': '测试环境数据库port配置不存在'})
if testevent.dbhost is None:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
db.session.commit()
return jsonify({'code': 52, 'msg': '测试环境数据库host配置不存在'})
if testevent.databaseuser is None:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
db.session.commit()
return jsonify({'code': 53, 'msg': '测试环境数据库登录user配置不存在'})
if testevent.databasepassword is None:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
db.session.commit()
return jsonify({'code': 54, 'msg': '测试环境数据库登录密码配置不存在'})
conncts = cursemsql(host=testevent.dbhost, port=testevent.dbport,
user=testevent.databaseuser, password=testevent.databasepassword,
database=testevent.database)
if conncts['code'] == 0:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
db.session.commit()
return jsonify({'code': 55, 'msg': '链接数据库出现问题,原因是:%s' % conncts['error']})
else:
result_myql = excemysql(conne=conncts['conne'], Sqlmy=case.chaxunshujuku)
if result_myql['code'] == 0:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
db.session.commit()
return jsonify({'code': 56, 'msg': '查询数据库出现问题,原因是:%s' % conncts['error']})
mysql_result = result_myql['result']
else:
mysql_result = []
try:
data = eval(pasrms)
except Exception as e:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
db.session.commit()
return jsonify({'code': 57, 'msg': '转化请求参数失败,原因:%s' % e})
me = Api(url=case.interface_id.Interface_url, fangshi=case.Interface_meth,
params=data, headers=ne)
result = me.getJson()
spend = me.spend()
return_mysql = pare_result_mysql(mysqlresult=mysql_result,
return_result=result, paseziduan=case.databaseziduan)
retur_re = assert_in(case.Interface_assert, result)
try:
if retur_re == 'pass' and return_mysql['result'] == 'pass':
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = False
save_reslut(key=case.id + "&" + url, value=str(result))
return jsonify({'code': 200, 'msg': '测试用例调试通过!'})
elif retur_re == 'fail' or return_mysql['result'] == 'fail':
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
save_reslut(key=case.id + "&" + url, value=str(result))
return jsonify({'code': 58, 'msg': '测试用例测试失败,请检查用例!'})
else:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
save_reslut(key=case.id + "&" + url, value=str(result))
return jsonify({'code': 59, 'msg': '测试返回异常,,请检查用例!'})
except Exception as e:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
save_reslut(key=case.id + "&" + url, value=str(result))
return jsonify({'code': 60, 'msg': u'用例测试失败,失败原因:{},请检查测试用例'.format(e)})
elif case.interface_type == 'dubbo':
try:
data = eval(case.Interface_pase)
except Exception as e:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
new_testre = TestcaseResult(case_id=case)
new_testre.result = str("转换参数失败")
new_testre.testevir = url
new_testre.by = False
db.session.commit()
return jsonify({'code': 61, 'msg': '转化请求参数失败,原因:%s' % e})
dubboapi = DubboInterface(url=case.Interface_url, interface=case.Interface_pase,
method=case.Interface_meth,
param=case.Interface_headers, **(data))
dubboapireslu = dubboapi.getresult()
if case.saveresult is True:
new_testre = TestcaseResult(case_id=case.id)
new_testre.result = str(dubboapireslu)
db.session.add(new_testre)
db.session.commit()
if dubboapireslu['code'] == 0:
assert_re = assert_in(asserqiwang=case.Interface_assert,
fanhuijson=json.loads(dubboapireslu))
if assert_re == 'pass':
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = False
db.session.commit()
return jsonify({'code': 200, 'msg': '测试用例调试通过!'})
elif assert_re == 'fail':
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
db.session.commit()
return jsonify({'code': 58, 'msg': '测试用例测试失败,请检查用例!'})
else:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
db.session.commit()
return jsonify({'code': 59, 'msg': '测试返回异常,,请检查用例!'})
elif dubboapireslu['code'] == 1:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
db.session.commit()
return jsonify({'code': 63, 'msg': '接口测试出错了!原因:%s' % dubboapireslu['result']})
else:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
db.session.commit()
return jsonify({'code': 630, 'msg': 'dubbo接口测试返回异常,请检查dubbo测试接口'})
else:
return jsonify({'code': 62, 'msg': '目前还不支持你所选择的类型的协议!'})
except Exception as e:
case.Interface_is_tiaoshi = True
case.Interface_tiaoshi_shifou = True
db.session.commit()
return jsonify({'code': 63, 'msg': '接口测试出错了!原因:%s' % e})
class DaochuCase(MethodView):
@login_required
def post(self):
project = request.form.get('interface_type')
project_case = Project.query.filter_by(project_name=str(project), status=False).first()
if project_case is None:
flash('你选择导出接口的项目不存在')
return redirect(url_for('home.interface'))
interface_list = InterfaceTest.query.filter_by(projects_id=project_case.id, status=False).all()
pad = os.getcwd()
day = time.strftime("%Y%m%d", time.localtime(time.time()))
file_dir = pad + '/app/upload'
file = os.path.join(file_dir, (day + '.xls'))
if os.path.exists(file) is False:
os.system('touch %s' % file)
result = create_interface_case(filename=file, caselist=interface_list)
if result['code'] == 1:
flash('导出接口失败!原因:%s' % result['error'])
return redirect(url_for('home.yongli'))
response = make_response(send_from_directory(file_dir, filename=day + '.xls', as_attachment=True))
return response
class OnecaseDetial(MethodView):
@login_required
def post(self):
case_id = request.get_data().decode('utf-8')
case_one = InterfaceTest.query.filter_by(id=int(case_id)).first()
if not case_one:
return jsonify({'code': 99, 'messgage': '没有找到你需要的测试用例', 'data': ''})
test_result = TestcaseResult.query.filter_by(case_id=case_one.id).all()
if not test_result or len(test_result) <= 0:
return jsonify({'code': 101, 'messgage': '您的测试用例没有在任何环境调试过', 'data': ''})
result_all = []
for rest_one in test_result:
if rest_one.spend == None:
spend_ed = 0
else:
spend_ed = rest_one.spend
if rest_one.ceshihuanjing == None:
ceshihuanjing = ''
else:
ceshihuanjing = rest_one.ceshihuanjing
result_all.append({'result': rest_one.result,
'date': rest_one.date.strftime('%Y-%m-%d %H:%M:%S'),
'event': ceshihuanjing,
'spend': spend_ed})
return jsonify({'code': 200, 'messgage': '请求成功', 'data': result_all})
| 53.633785
| 119
| 0.508702
|
4a089cf4227c19fbde89d3ed922077313a5cbb32
| 16,015
|
py
|
Python
|
Src Code/baseline.py
|
kautsiitd/Unsupervised-Decomposition-of-a-Multi-Author-Document
|
33dab2dce3de2c08c7ae1a34646059653e2fcccc
|
[
"MIT"
] | 4
|
2017-09-26T19:39:06.000Z
|
2020-05-14T14:54:55.000Z
|
Src Code/baseline.py
|
kautsiitd/Unsupervised-Decomposition-of-a-Multi-Author-Document
|
33dab2dce3de2c08c7ae1a34646059653e2fcccc
|
[
"MIT"
] | null | null | null |
Src Code/baseline.py
|
kautsiitd/Unsupervised-Decomposition-of-a-Multi-Author-Document
|
33dab2dce3de2c08c7ae1a34646059653e2fcccc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# encoding=utf8
from __future__ import unicode_literals
'''#############################'''
'''#####Importing Libraries#####'''
'''#############################'''
import sys
import os
import io
import re
import pickle
from random import randint as rnd
from random import shuffle
from itertools import groupby
from sklearn.feature_extraction.text import CountVectorizer as CV
from sklearn.mixture import GMM
from pprint import pprint
import operator
import math
import nltk
from sklearn.naive_bayes import MultinomialNB as BNB
import itertools
import numpy as np
from pos_tag import pos_tagging
# from pq_gram import find_pq_grams
print "Import Done"
'''#############################'''
'''#####Importing Libraries#####'''
'''#############################'''
# variables
V = 200
data_type = "Original"
b_num = 0
b = ["Becker-Posner","GC-TF-PK","MD-TF-PK","MD-GC-PK","MD-GC-TF-PK"]
gmm_initialisation = 5
# number of most frequent features
max_features = 1500
# segment size
seg_size = 30
# choosing top vital segment in a class
best_per = .8
# number of sentence to test on from final model
n_gram_size = 1
lowercase = True
tokenizer = None
token_pattern = u'(?u)\\b\\w\\w+\\b'
# threshold for trusted sentence
trus_thrs = .95
'''###########################'''
'''##########Step 1###########'''
'''extracting and merging data'''
'''###########################'''
'''
books_names = ['a',b','c',....]
merged_data = ['sentence1','sentence2',.....]
label_sen = [0,0,0,1,2,.....] label of sentence in merged_data
segments = ['segment1','segment2',.....]
label_in_seg= [[0,0,0,1,2,0,0,..]
[0,1,1,2,1,0,1,..]
....
] label of sentences in individual segments
label_in_seg = [0,1,1,0,2,0,...] book with max count in segment
'''
# STEP 1
# extracting and merging data
# variables
# Eze-Job
# NewYorkTimesArticles
# MD-TF
# GC-MD
# Becker-Posner
# GC-TF-PK
# MD-TF-PK
# MD-GC-PK
# MD-GC-TF-PK
folder = "dataset/"+data_type+"/"+b[b_num]
books_names = os.listdir(folder)
merged_data = []
label_sen = []
segments = []
label_seg = []
label_in_seg= []
# main
number_books= len(books_names)
books_data = []
for book in books_names:
path = os.path.join(folder,book)
f = io.open(path, encoding="ISO-8859-1")
books_data.append(f.readlines())
number_sen = [len(book_data) for book_data in books_data]
total_sen = sum(number_sen)
number_seg = int(math.ceil((total_sen/seg_size)))
count_sen = [0]*number_books
while(sum(count_sen) != total_sen):
size = rnd(1,V)
done_book = [0]*number_books
for i in range(number_books):
book_num = rnd(0,number_books-1)
while(done_book[book_num] != 0):
book_num = rnd(0,number_books-1)
done_book[book_num] = 1
new_count_sen = count_sen[book_num] + min(size,number_sen[book_num]-count_sen[book_num])
for j in books_data[book_num][ count_sen[book_num]:new_count_sen ]:
merged_data.append( re.sub('[\r\n]','',j) )
label_sen.extend([book_num] * (new_count_sen - count_sen[book_num]) )
count_sen[book_num] = new_count_sen
for i in range(number_seg):
start = seg_size*i
end = min(seg_size*(i+1),total_sen)
seg_data = merged_data[start:end]
segments.append(' '.join(seg_data))
labels = label_sen[start:end]
label_in_seg.append(labels)
for i in range(number_seg):
label_seg.append(max(set(label_in_seg[i]), key=label_in_seg[i].count))
'''######'''
'''Step 1'''
'''######'''
'''###########################'''
'''Printing Results of merging'''
'''###########################'''
'''
org_seg = [430,405,...,150] number of pure segments by author i, last one for mixed
'''
# calculating segments by each author
org_seg = [0 for i in range(number_books+1)]
for i in range(number_seg):
if( sum(label_in_seg[i])%len(label_in_seg[i]) == 0):
org_seg[ sum(label_in_seg[i])/len(label_in_seg[i]) ] += 1
else:
org_seg[-1] += 1
for i in range(number_books):
print "Author "+str(i)+":",org_seg[i]
print "Mixed :",org_seg[-1]
print "STEP 1 done"
'''###########################'''
'''Printing Results of merging'''
'''###########################'''
'''#########################################'''
'''#################Step 2##################'''
'''finding features and vectorising segments'''
'''#########################################'''
'''
model = model with feature words having atleast frequency = 3 = 11000
vec_seg(sparse matrix) = [ [0,0,1,1,0,1,1,1,1,0,0,0,0,1,1,... number of feature words=11000]
[0,0,1,0,0,1,1,0,1,0,0,1,1,0,0,... whether word present or not]
....
number of segments
]
number_f_w = number of feature words extracted from merged data
'''
model = CV(binary = True, min_df = 3, ngram_range=(1,n_gram_size), max_features=20000, lowercase=lowercase, tokenizer=tokenizer, token_pattern=token_pattern)
model = model.fit(merged_data)
vec_seg = model.transform(segments)
number_f_w = len(model.vocabulary_)
vec_seg = vec_seg.toarray()
max_features = min(max_features,number_f_w)
print "number of feature words:",number_f_w
print "STEP 2 done"
'''######'''
'''Step 2'''
'''######'''
'''############################################'''
'''#################Step 3#####################'''
'''Unsupervised labelling of segments using GMM'''
'''############################################'''
'''
label_p = [0,1,0,1,2,0,1,.... number of segments] predicted label for each segment
count_mapping = [[20,3,450,... number of books] how much predicted label match to original label(max count)
[410,5,10,..]
...
number of books
]
mapping = [2,0,1,5,3,...] What predicted label match to in original label
clusters = [['sentence','sentence',..... in cluster 0]
['sentence','sentence',..... in cluster 1]
....
number of books
]
'''
mapping = [0 for i in range(number_books)]
while(len(set(mapping)) != number_books):
model1 = GMM(n_components = number_books, n_iter = 1000, covariance_type = 'diag', n_init = gmm_initialisation, verbose = 1)
model1 = model1.fit(vec_seg)
label_p = model1.predict_proba(vec_seg)
temp_label_p = []
for j in range(number_seg):
temp_label_p.append(map(lambda x: (x),label_p[j]) . index(max(label_p[j])))
label_p = temp_label_p
count_mapping = [ [0 for j in range(number_books)] for i in range(number_books)]
for i,j in zip(label_p,label_seg):
count_mapping[i][j] += 1
for i in range(number_books):
max_frq = max(count_mapping[i])
mapping[i] = count_mapping[i].index(max_frq)
print "mapping:",mapping
print "count_mapping:",count_mapping
# updating label_p with mapping
for i in range(number_seg):
label_p[i] = mapping[label_p[i]]
# segments in each clusters as sentences
clusters = [[] for i in range(number_books)]
for i in range(number_seg):
clusters[label_p[i]].append(segments[i])
'''######'''
'''Step 3'''
'''######'''
'''################################'''
'''Calculating Precision and Recall'''
'''################################'''
confusion_matrix = [ [0 for j in range(number_books)] for i in range(number_books)]
for i in range(number_seg):
confusion_matrix[label_p[i]][label_seg[i]] += 1
recall = []
for i in range(number_books):
recall.append(float(confusion_matrix[i][i])/sum(zip(*confusion_matrix)[i]))
print "Recall:",recall
print float(sum(recall))/number_books
print "mapping:",mapping
print "confusion_matrix:",confusion_matrix
print "STEP 3 done"
'''################################'''
'''Calculating Precision and Recall'''
'''################################'''
'''############################################################'''
'''######################Step 4################################'''
'''Revectorising segments with max_features most frequent words'''
'''############################################################'''
'''
model2 = model with at most max_features=1500 feature words
vec_seg_cls(sparse matrix) = [[ [0,1,1,0,1,1,1,0,..... max_features=1500],[vector of segment 2],.... cluster 0]
[ [0,0,1,1,0,0,1,0,..... max_features=1500],[vector of segment 2],.... cluster 1]
....
number of books
] vector representation of each segment in corresponding cluster
vec_seg_new(sparse matrix) = [[0,1,1,0,1,1,1,0,..... max_features=1500]
[0,0,1,1,0,0,1,0,..... max_features=1500]
....
number of segments
] vector representation of each segment
'''
model2 = CV(ngram_range=(1,n_gram_size), max_features = max_features)
model2 = model2.fit(merged_data)
vec_seg_cls = [model2.transform(clusters[i]) for i in range(number_books)]
vec_seg_new = model2.transform(segments)
print "STEP 4 done"
'''#####################################'''
'''###############Step 5################'''
'''Applying SegmentElicitation Procedure'''
'''#####################################'''
'''
vec_seg_cls(dense) = vector representation of each segment in corresponding cluster
vec_seg_new(dense) = vector representation of each segment
word_cls_frq = frequency of feature words(max_features=1500) in each cluster
= [[25,100,13,15,253,.... number of feature words] cluster 0
[65,200,123,10,15,.... number of feature words] cluster 1
....
number of clusters/books
]
word_frq = each feature word(max_features=1500) frequency in whole document
= [150,550,260,1021,.... number of feature words(max_features=1500)]
post_p_w = posterior probability of each feature word in each cluster/book
= [ [0.3,0.25,.... number of clusters/books] word 1
[0.1,0.15,.... number of clusters/books] word 2
....
number of feature words(max_features=1500)
]
post_p_seg = posterior probability of each segment in each cluster
= [ [[0.85,0.01,0.1,... number of books,0(segment number)], [segment 2],.... number of segmensin this cluster] cluster 1
[[0.85,0.01,0.1,... number of books,1(segment number)], [segment 2],.... number of segmensin this cluster] cluster 2
....
number of clusters/books
]
best_seg = 80% of post_p_seg for each cluster in same format
= [ [[0.85,0.01,0.1,... number of books,0(segment number)], [segment 2],.... number of segments in this cluster] cluster 1
[[0.85,0.01,0.1,... number of books,1(segment number)], [segment 2],.... number of segments in this cluster] cluster 2
....
number of clusters/books
]
'''
# calculating posterior probability of words
# variables
post_p_w = []
dense_array = [i.toarray() for i in vec_seg_cls]
dense_array1 = vec_seg_new.toarray()
word_cls_frq = [[sum(word_f) for word_f in zip(*cluster)] for cluster in dense_array]
word_frq = [sum(word_f) for word_f in zip(*word_cls_frq)]
# main
for i in range(max_features):
post_p_w.append([])
for j in range(number_books):
post_p_w[i].append(float(word_cls_frq[j][i])/word_frq[i])
# calculating posterior probability of segments in each cluster
post_p_seg = [[] for i in range(number_books)]
# jth segment ith cluster
for j in range(number_seg):
cls_num = label_p[j]
temp = []
for i in range(number_books):
summation = 0
for k in range(max_features):
if (dense_array1[j][k]>0 and post_p_w[k][i]>0):
summation += math.log(post_p_w[k][i])
temp.append(summation)
temp.append(j)
post_p_seg[cls_num].append(temp)
# print post_p_seg[cls_num][-1]
'''################finding vital segment for each cluster####################'''
'''Choosing best 80%(best_per) of segments to represent corresponding cluster'''
'''##########################################################################'''
best_seg = []
for i in range(number_books):
end = int(best_per*len(post_p_seg[i]))
sort_seg = sorted(post_p_seg[i], key=lambda x:-x[i]+max(x[:i]+x[i+1:-1]))
best_seg.append(sort_seg[:end])
print "STEP 5 done"
'''######'''
'''Step 5'''
'''######'''
'''#################################################################################################'''
'''#########################################Step 6##################################################'''
'''Representing vital segments in form of minimum 3 frq feature words for each corresponding cluster'''
'''#################################################################################################'''
'''
vec_seg(dense) = vector representation of each segment
vital_seg = [ [ [0,1,1,0,0,1,1,1,0,0,0,... ~1500 max_features(=1500)], [0,1,1,0,0,1,1,1,0,0,0,...],.... number of vital segments] cluster 0
[ [0,1,1,0,0,1,1,1,0,0,0,... ~1500 max_features(=1500)], [0,1,1,0,0,1,1,1,0,0,0,...],.... number of vital segments] cluster 1
....
number of clusters
]
'''
print "STEP 5 done"
vital_seg = []
for cluster_n in range(number_books):
vital_seg.append([])
for seg in best_seg[cluster_n]:
vital_seg[cluster_n].append(dense_array1[seg[-1]])
print "STEP 6 done"
'''######'''
'''Step 6'''
'''######'''
'''###############################################################################'''
'''#################################Step 7########################################'''
'''Training using Bernouli Naive-Bayesian model to learn a classifier on vital_seg'''
'''###############################################################################'''
'''
vital_seg = 2*number of vital_seg*1500
train = 2*number of vital_seg*1500
'''
train = []
labels= []
for cluster_n in range(number_books):
for seg in vital_seg[cluster_n]:
train.append(seg.tolist())
labels.append(cluster_n)
model3 = BNB(fit_prior = True)
model3 = model3.fit(train, labels)
print "STEP 7 done"
'''######'''
'''Step 7'''
'''######'''
'''################################################################'''
'''##########################Step 8################################'''
'''classfying sentences on trained classifier and calculating score'''
'''################################################################'''
'''
auth_proba = [[.22, .05, .12, ... number of authors]
[.22, .05, .12, ... number of authors]
...
test_size
] probability of a sentence written by authors
'''
test_size = len(merged_data)
vec_sen = model2.transform(merged_data[:test_size])
auth_proba = model3.predict_proba(vec_sen)
predicted = [map(lambda x: (x),auth_proba[i]).index(max(auth_proba[i])) for i in range(test_size)]
org_label = label_sen[:test_size]
print model3.score(vec_sen, org_label)
print "STEP 8 done"
'''######'''
'''Step 8'''
'''######'''
'''#########################################'''
'''################Step 9###################'''
'''Applying Probability Indication Procedure'''
'''#########################################'''
# Rule 1
is_trusted = []
for i in range(test_size):
temp = sorted(auth_proba[i])
if(temp[-1]-temp[-2] > trus_thrs):
is_trusted.append(predicted[i])
else:
is_trusted.append(-1)
# Rule 2
if(is_trusted[0] == -1):
for i in range(1,test_size):
if(is_trusted[i] != -1):
is_trusted[0] = is_trusted[i]
break
if(is_trusted[0] == -1):
is_trusted[0] = map(lambda x: (x),auth_proba[0]).index(max(auth_proba[0]))
# Rule 3
if(is_trusted[-1] == -1):
for i in range(test_size-1, -1, -1):
if(is_trusted[i] != -1):
is_trusted[-1] = is_trusted[i]
break
if(is_trusted[-1] == -1):
is_trusted[-1] = map(lambda x: (x),auth_proba[-1]).index(max(auth_proba[-1]))
# Rule 4 & 5
before_label = -1
for i in range(test_size):
if(is_trusted[i] != -1):
before_label = is_trusted[i]
else:
after_label = -1
start = i
end = i
while(i < test_size):
i += 1
if(is_trusted[i] != -1):
after_label = is_trusted[i]
end = i
break
if(before_label == after_label):
for j in range(start,end):
is_trusted[j] = before_label
else:
for j in range(start,(start+end)/2):
is_trusted[j] = before_label
for j in range((start+end)/2,end):
is_trusted[j] = after_label
print "STEP 9 done"
'''######'''
'''Step 9'''
'''######'''
# Checking New Score
correct = 0
for i in range(test_size):
if(org_label[i] == is_trusted[i]):
correct += 1
print "New Accuracy:",float(correct*100)/test_size,"%"
| 34.589633
| 160
| 0.578957
|
4a089d011f8cd9601cf69e367ba56d3a4190aa0d
| 2,359
|
py
|
Python
|
reki_data_tool/postprocess/grid/gfs/ne/case/case_dask_v1.py
|
perillaroc/reki-data-tool
|
047424a2f8a1f0e16684bffaeded4044366f63c0
|
[
"MIT"
] | null | null | null |
reki_data_tool/postprocess/grid/gfs/ne/case/case_dask_v1.py
|
perillaroc/reki-data-tool
|
047424a2f8a1f0e16684bffaeded4044366f63c0
|
[
"MIT"
] | null | null | null |
reki_data_tool/postprocess/grid/gfs/ne/case/case_dask_v1.py
|
perillaroc/reki-data-tool
|
047424a2f8a1f0e16684bffaeded4044366f63c0
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from typer.testing import CliRunner
from loguru import logger
from reki_data_tool.postprocess.grid.gfs.ne.create_task import app
CASE_BASE_DIRECTORY = "/g11/wangdp/project/work/data/playground/operation/gfs/ne/case/11-dask-v1"
runner = CliRunner()
def test_dask_v1():
nodes_list = (1, 2, 4, 8)
count = 20
partition = "normal"
script_base_directory = Path(CASE_BASE_DIRECTORY, "script")
for node_count in nodes_list:
for test_index in range(1, count+1):
logger.info(f"create job script for NODE {node_count} TEST {test_index}...")
script_path = Path(script_base_directory, f"node_{node_count:02}", f"test_{test_index:02}.cmd")
script_path.parent.mkdir(parents=True, exist_ok=True)
work_dir = Path(CASE_BASE_DIRECTORY, f"node_{node_count:02}", f"test_{test_index:02}")
work_dir.mkdir(parents=True, exist_ok=True)
result = runner.invoke(app, [
"dask-v1",
"--output-script-path", script_path.absolute(),
"--work-directory", work_dir.absolute(),
"--nodes", node_count,
"--partition", partition
])
def test_dask_v1_less_than_one_node():
nodes = 1
tasks_per_node_list = (1, 2, 4, 8, 16, 32)
count = 20
partition = "normal"
script_base_directory = Path(CASE_BASE_DIRECTORY, "script")
for tasks_per_node in tasks_per_node_list:
for test_index in range(1, count+1):
logger.info(f"create job script for TASKS {tasks_per_node} TEST {test_index}...")
script_path = Path(script_base_directory, f"task_{tasks_per_node:02}", f"test_{test_index:02}.cmd")
script_path.parent.mkdir(parents=True, exist_ok=True)
work_dir = Path(CASE_BASE_DIRECTORY, f"task_{tasks_per_node:02}", f"test_{test_index:02}")
work_dir.mkdir(parents=True, exist_ok=True)
result = runner.invoke(app, [
"dask-v1",
"--output-script-path", script_path.absolute(),
"--work-directory", work_dir.absolute(),
"--nodes", nodes,
"--ntasks-per-node", tasks_per_node,
"--partition", partition
])
print(result)
if __name__ == "__main__":
test_dask_v1()
| 34.691176
| 111
| 0.621874
|
4a089e80105c65608380b60a6d3ca211238e6981
| 461
|
py
|
Python
|
rusentrel/classic_cv/ctx/att_cnn.py
|
nicolay-r/attitude-extraction-with-attention-and-ds
|
fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d
|
[
"MIT"
] | null | null | null |
rusentrel/classic_cv/ctx/att_cnn.py
|
nicolay-r/attitude-extraction-with-attention-and-ds
|
fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d
|
[
"MIT"
] | 1
|
2020-12-16T18:21:11.000Z
|
2020-12-30T10:08:27.000Z
|
rusentrel/classic_cv/ctx/att_cnn.py
|
nicolay-r/attitude-extraction-with-attention-and-ds
|
fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d
|
[
"MIT"
] | 1
|
2021-03-29T20:58:26.000Z
|
2021-03-29T20:58:26.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
sys.path.append('../../../')
from rusentrel.classic.ctx.att_cnn import run_testing_att_cnn
from rusentrel.classic_cv.common import CV_COUNT, \
classic_cv_common_callback_modification_func, \
CV_NAME_PREFIX
if __name__ == "__main__":
run_testing_att_cnn(
name_prefix=CV_NAME_PREFIX,
cv_count=CV_COUNT,
custom_callback_func=classic_cv_common_callback_modification_func)
| 25.611111
| 74
| 0.741866
|
4a089efb03d7542cac9a67d7b1bd45243947e037
| 4,799
|
py
|
Python
|
test/functional/zmq_test.py
|
tedy5/Ravencoin
|
c8fa05754f61f4da464675e34b4c2eb5129dac33
|
[
"MIT"
] | 85
|
2018-01-28T11:36:04.000Z
|
2022-03-12T01:50:34.000Z
|
test/functional/zmq_test.py
|
tedy5/Ravencoin
|
c8fa05754f61f4da464675e34b4c2eb5129dac33
|
[
"MIT"
] | 3
|
2018-02-05T03:04:33.000Z
|
2018-02-08T04:03:57.000Z
|
test/functional/zmq_test.py
|
tedy5/Ravencoin
|
c8fa05754f61f4da464675e34b4c2eb5129dac33
|
[
"MIT"
] | 39
|
2018-02-24T21:01:54.000Z
|
2021-08-15T16:05:02.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Raven Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the ZMQ notification interface."""
import configparser
import os
import struct
from test_framework.test_framework import RavenTestFramework, SkipTest
from test_framework.util import (assert_equal,
bytes_to_hex_str,
hash256,
hash_block,
)
class ZMQSubscriber:
def __init__(self, socket, topic):
self.sequence = 0
self.socket = socket
self.topic = topic
import zmq
self.socket.setsockopt(zmq.SUBSCRIBE, self.topic)
def receive(self):
topic, body, seq = self.socket.recv_multipart()
# Topic should match the subscriber topic.
assert_equal(topic, self.topic)
# Sequence should be incremental.
assert_equal(struct.unpack('<I', seq)[-1], self.sequence)
self.sequence += 1
return body
class ZMQTest (RavenTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_nodes(self):
# Try to import python3-zmq. Skip this test if the import fails.
try:
import zmq
except ImportError:
raise SkipTest("python3-zmq module not available.")
# Check that raven has been built with ZMQ enabled.
config = configparser.ConfigParser()
if not self.options.configfile:
self.options.configfile = os.path.abspath(os.path.join(os.path.dirname(__file__), "../config.ini"))
config.read_file(open(self.options.configfile))
if not config["components"].getboolean("ENABLE_ZMQ"):
raise SkipTest("ravend has not been built with zmq enabled.")
# Initialize ZMQ context and socket.
# All messages are received in the same socket which means
# that this test fails if the publishing order changes.
# Note that the publishing order is not defined in the documentation and
# is subject to change.
address = "tcp://127.0.0.1:28766"
self.zmq_context = zmq.Context()
socket = self.zmq_context.socket(zmq.SUB)
socket.set(zmq.RCVTIMEO, 60000)
socket.connect(address)
# Subscribe to all available topics.
self.hashblock = ZMQSubscriber(socket, b"hashblock")
self.hashtx = ZMQSubscriber(socket, b"hashtx")
self.rawblock = ZMQSubscriber(socket, b"rawblock")
self.rawtx = ZMQSubscriber(socket, b"rawtx")
self.extra_args = [["-zmqpub%s=%s" % (sub.topic.decode(), address) for sub in [self.hashblock, self.hashtx, self.rawblock, self.rawtx]], []]
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
def run_test(self):
try:
self._zmq_test()
finally:
# Destroy the ZMQ context.
self.log.debug("Destroying ZMQ context")
self.zmq_context.destroy(linger=None)
def _zmq_test(self):
num_blocks = 5
self.log.info("Generate %(n)d blocks (and %(n)d coinbase txes)" % {"n": num_blocks})
genhashes = self.nodes[0].generate(num_blocks)
self.sync_all()
for x in range(num_blocks):
# Should receive the coinbase txid.
txid = self.hashtx.receive()
# Should receive the coinbase raw transaction.
hex = self.rawtx.receive()
assert_equal(bytes_to_hex_str(hash256(hex)),
self.nodes[1].getrawtransaction(bytes_to_hex_str(txid), True)["hash"])
# Should receive the generated block hash.
hash = bytes_to_hex_str(self.hashblock.receive())
assert_equal(genhashes[x], hash)
# The block should only have the coinbase txid.
assert_equal([bytes_to_hex_str(txid)], self.nodes[1].getblock(hash)["tx"])
# Should receive the generated raw block.
block = self.rawblock.receive()
assert_equal(genhashes[x], hash_block(bytes_to_hex_str(block[:80])))
self.log.info("Wait for tx from second node")
payment_txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# Should receive the broadcasted txid.
txid = self.hashtx.receive()
assert_equal(payment_txid, bytes_to_hex_str(txid))
# Should receive the broadcasted raw transaction.
hex = self.rawtx.receive()
assert_equal(payment_txid, bytes_to_hex_str(hash256(hex)))
if __name__ == '__main__':
ZMQTest().main()
| 38.392
| 148
| 0.629089
|
4a089f104450135f4882faa100b56335557fddae
| 443
|
py
|
Python
|
done/2.py
|
eNV25/euler
|
6fbbb057f9051c0970a0c2d59458167b909f4ada
|
[
"CC0-1.0"
] | null | null | null |
done/2.py
|
eNV25/euler
|
6fbbb057f9051c0970a0c2d59458167b909f4ada
|
[
"CC0-1.0"
] | null | null | null |
done/2.py
|
eNV25/euler
|
6fbbb057f9051c0970a0c2d59458167b909f4ada
|
[
"CC0-1.0"
] | null | null | null |
import sys
def fibonacci(maxn):
maxn = int(maxn)
n1 = 1
n2 = 2
nn = 1
fibo = [1, 2]
while True:
n3 = n1 + n2
fibo.append(n3)
nn = nn + 1
n1 = n2
n2 = n3
if fibo[nn] >= maxn:
break
return fibo
fibo = fibonacci(input("maxn: "))
evenfibo = []
for i in fibo:
if not i % 2:
evenfibo.append(i)
print(sum(evenfibo))
"""
in 4000000
out 4613732
"""
| 15.275862
| 33
| 0.485327
|
4a089f2f94f38edc6104d3dbe947c9f0e76fdf8c
| 3,444
|
py
|
Python
|
tools/create_release_notes.py
|
bjacobs1/vunit
|
a7f7717a172855ea7852296bb768370d50cfc992
|
[
"Artistic-2.0"
] | 1
|
2020-08-30T08:30:02.000Z
|
2020-08-30T08:30:02.000Z
|
tools/create_release_notes.py
|
smgl9/vunit
|
9933d9a1ae600cc241894244361282dd7f7227d7
|
[
"Artistic-2.0"
] | null | null | null |
tools/create_release_notes.py
|
smgl9/vunit
|
9933d9a1ae600cc241894244361282dd7f7227d7
|
[
"Artistic-2.0"
] | null | null | null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014-2018, Lars Asplund lars.anders.asplund@gmail.com
"""
Create monolithic release notes file from several input files
"""
from __future__ import print_function
from os.path import join, dirname, basename, splitext, relpath
from glob import glob
from subprocess import check_output, CalledProcessError
import datetime
def get_releases(source_path):
"""
Get all releases defined by release note files
"""
release_notes = join(source_path, "release_notes")
releases = []
for idx, file_name in enumerate(sorted(glob(join(release_notes, "*.rst")), reverse=True)):
releases.append(Release(file_name, is_latest=idx == 0))
return releases
def create_release_notes():
"""
Create monolithic release notes file from several input files
"""
source_path = join(dirname(__file__), "..", "docs")
releases = get_releases(source_path)
latest_release = releases[0]
def banner(fptr):
fptr.write("\n" + ("-" * 80) + "\n\n")
with open(join(source_path, "release_notes.rst"), "w") as fptr:
fptr.write("""
.. _release_notes:
Release notes
=============
For installation instructions read :ref:`this <installing>`.
`Commits since last release <https://github.com/VUnit/vunit/compare/%s...master>`__
""" % latest_release.tag)
banner(fptr)
for idx, release in enumerate(releases):
is_last = idx == len(releases) - 1
if release.is_latest:
fptr.write(".. _latest_release:\n\n")
title = ":vunit_commit:`%s <%s>` - %s" % (release.name, release.tag, release.date.strftime("%Y-%m-%d"))
if release.is_latest:
title += " (latest)"
fptr.write(title + "\n")
fptr.write("-" * len(title) + "\n\n")
fptr.write(".. include:: %s\n" % relpath(release.file_name, source_path))
fptr.write("\n`Download from PyPI <https://pypi.python.org/pypi/vunit_hdl/%s/>`__\n"
% release.name)
if not is_last:
fptr.write("\n`Commits since previous release <https://github.com/VUnit/vunit/compare/%s...%s>`__\n"
% (releases[idx + 1].tag, release.tag))
banner(fptr)
class Release(object):
"""
A release object
"""
def __init__(self, file_name, is_latest):
self.file_name = file_name
self.name = splitext(basename(file_name))[0]
self.tag = "v" + self.name
self.is_latest = is_latest
try:
self.date = _get_date(self.tag)
except CalledProcessError:
if self.is_latest:
# Release tag for latest release not yet created, assume HEAD will become release
print("Release tag %s not created yet, use HEAD for date" % self.tag)
self.date = _get_date("HEAD")
else:
raise
with open(file_name, "r") as fptr:
self.notes = fptr.read()
def _get_date(commit):
date_str = check_output(["git", "log", "-1", "--format=%ci", commit]).decode().strip()
date_str = " ".join(date_str.split(" ")[0:2])
return datetime.datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S")
| 31.59633
| 116
| 0.605981
|
4a089f44cf969ed0033c18e8e027d050aa69c9b4
| 5,516
|
py
|
Python
|
image_folders.py
|
billallen256/photography
|
74233e19b3e776767d3d25353dec511775c32bff
|
[
"MIT"
] | null | null | null |
image_folders.py
|
billallen256/photography
|
74233e19b3e776767d3d25353dec511775c32bff
|
[
"MIT"
] | null | null | null |
image_folders.py
|
billallen256/photography
|
74233e19b3e776767d3d25353dec511775c32bff
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# vim: expandtab tabstop=4 shiftwidth=4
from argparse import ArgumentParser
from datetime import datetime
import exifread
import logging
import os
import shutil
import sys
logging.basicConfig(level=logging.INFO)
min_datetime = datetime(2015, 1, 1)
def determine_capture_time(basename, extensions):
capture_time = None
possible_dates = ( get_date(basename+e) for e in extensions )
possible_dates = ( dt for dt in possible_dates if dt is not None )
possible_dates = [ dt for dt in possible_dates if dt > min_datetime ]
if len(possible_dates) == 0:
capture_time = None
elif len(possible_dates) == 1:
capture_time = possible_dates[0]
else:
capture_time = min(possible_dates)
return capture_time
def get_date(file_path):
exif_date = get_exif_date(file_path)
if exif_date is not None:
return exif_date
return datetime.fromtimestamp(os.path.getmtime(file_path))
def get_exif_date(file_path):
time_field = 'Image DateTime'
with open(file_path, 'rb') as f:
try:
tags = exifread.process_file(f, details=False, stop_tag=time_field)
if time_field in tags:
return datetime.strptime(tags[time_field].values, '%Y:%m:%d %H:%M:%S')
else:
return None
except Exception as e:
logging.error(str(e))
return None
def determine_output_dir(output_dir, dt, default_event):
new_dir = dt.strftime('%Y.%m.%d')
default_event = default_event.strip()
if len(default_event) > 0:
new_dir += '.' + default_event
return output_dir + os.sep + new_dir
def make_output_dir(full_path, pretend=False):
if not os.path.exists(full_path):
logging.info('Making directory {0}'.format(full_path))
if not pretend:
os.mkdir(full_path, mode=0o755)
def make_name(prefix, dt):
return prefix.strip() + dt.strftime('%Y%m%d%H%M%S')
def copy_file(from_path, to_path, pretend=False):
logging.info('Copying {0} to {1}'.format(from_path, to_path))
if not pretend:
shutil.copy2(from_path, to_path)
def group_files(files):
groups = {}
for f in files:
basename, ext = os.path.splitext(f)
if basename not in groups:
groups[basename] = set()
if len(ext) > 0:
groups[basename].add(ext)
return groups
def transpose_dict(d):
ret = {}
for k, v in d.items():
if v not in ret:
ret[v] = set()
ret[v].add(k)
return ret
def generate_move_ops(output_paths, file_groups):
for output_path, basenames in output_paths.items():
seq = ' ABCDEFGHIJKLMNOPQRSTUVWXYZ' #TODO make this a generator to handle infinite conflicts
seq_counter = 0
# sort the basenames to preserve sequencing of files captured in the same second
for basename in sorted(basenames):
for ext in file_groups[basename]:
from_path = basename + ext
to_path = (output_path + seq[seq_counter]).strip() + ext
yield (from_path, to_path)
seq_counter += 1
def setup_argparser():
parser = ArgumentParser(description='Imports a directory of photos into dated directories with dated filenames.')
parser.set_defaults(pretend=False)
parser.add_argument('--input_dir', required=True, help='Directory to read files from (non-recursive)')
parser.add_argument('--output_dir', required=True, help='Directory to place dated directories and files')
parser.add_argument('--prefix', default='', required=False, help='Prefix that will be placed onto the name of each file, such as photographer initials')
parser.add_argument('--default_event', default='', required=False, help='Default event name to place at the end of each dated directory name')
parser.add_argument('--pretend', dest='pretend', action='store_true', help="Don't actually execute copy commands, just list them out")
parsed = parser.parse_args()
return parsed
if __name__ == "__main__":
args = setup_argparser()
input_directory = args.input_dir
output_directory = args.output_dir
if input_directory == output_directory:
logging.error('Input directory cannot be the same as the output directory')
files = os.listdir(input_directory)
files = ( input_directory + os.sep + f for f in files )
files = ( f for f in files if os.path.isfile(f) )
file_groups = group_files(files)
capture_times = { basename: determine_capture_time(basename, extensions) for basename, extensions in file_groups.items() }
file_groups = { basename: extensions for basename, extensions in file_groups.items() if capture_times[basename] is not None }
output_dirs = { basename: determine_output_dir(output_directory, capture_times[basename], args.default_event) for basename in file_groups }
# need to ensure the new filenames containing the capture time don't conflict
# within their new output directories
output_paths = { basename: output_dirs[basename]+os.sep+make_name(args.prefix, capture_times[basename]) for basename in file_groups }
output_paths = transpose_dict(output_paths) # transpose so we can generate the move operations as a reduce
for d in set(output_dirs.values()):
make_output_dir(d, pretend=args.pretend)
for from_path, to_path in generate_move_ops(output_paths, file_groups):
copy_file(from_path, to_path, pretend=args.pretend)
| 35.133758
| 156
| 0.686911
|
4a089fc0a85c939ebddb7e374477079d89de3c4b
| 3,881
|
py
|
Python
|
x_tef2/bulk_plot_compare.py
|
parkermac/LiveOcean
|
bef3e1e729ada1069853dd4f57f79f452b54f4fa
|
[
"MIT"
] | 4
|
2015-06-09T18:53:11.000Z
|
2021-08-19T01:39:38.000Z
|
x_tef2/bulk_plot_compare.py
|
parkermac/LiveOcean
|
bef3e1e729ada1069853dd4f57f79f452b54f4fa
|
[
"MIT"
] | null | null | null |
x_tef2/bulk_plot_compare.py
|
parkermac/LiveOcean
|
bef3e1e729ada1069853dd4f57f79f452b54f4fa
|
[
"MIT"
] | 1
|
2017-03-07T01:28:49.000Z
|
2017-03-07T01:28:49.000Z
|
"""
Plot bulk fluxes as a time series. Meant to focus on side-by-side
comparisons of two variations (e.g. one with smaller tides)
"""
import os, sys
sys.path.append(os.path.abspath('../alpha'))
import Lfun
import zfun
Ldir = Lfun.Lstart()
import numpy as np
import matplotlib.pyplot as plt
import pickle
import pandas as pd
from datetime import datetime, timedelta
import netCDF4 as nc
import tef_fun
import flux_fun
from importlib import reload
reload(flux_fun)
# get the DataFrame of all sections
sect_df = tef_fun.get_sect_df()
sect_name = 'ai2'
testing = True
indir0 = Ldir['LOo'] + 'tef2/'
indir_a = indir0 + 'cas6_v3_lo8b_2018.01.01_2018.12.31/'
indir_b = indir0 + 'cas6_v3t075_lo8_2018.01.01_2018.12.31/'
outdir = indir0 + 'bulk_plots_compare/'
Lfun.make_dir(outdir)
# PLOTTING
lw=2
fs=16
ms = 20
alpha = .2
qscl = 50
plt.rc('font', size=fs)
plt.close('all')
fig = plt.figure(figsize=(18,11))
ii = 1
for indir in [indir_a, indir_b]:
# ---------------------------------------------------------
tef_df, in_sign = flux_fun.get_fluxes(indir, sect_name)
tef_df['Qout'] = -tef_df['Qout']/1000
tef_df['Qin'] = tef_df['Qin']/1000
tef_df['DS'] = tef_df['Sin'] - tef_df['Sout']
tef_df['Qtide'] = tef_df['Qtide']/1000
# some information about direction
x0, x1, y0, y1 = sect_df.loc[sect_name,:]
if (x0==x1) and (y0!=y1):
sdir = 'NS'
if in_sign == 1:
dir_str = 'Eastward'
elif in_sign == -1:
dir_str = 'Westward'
a = [y0, y1]; a.sort()
y0 = a[0]; y1 = a[1]
elif (x0!=x1) and (y0==y1):
sdir = 'EW'
if in_sign == 1:
dir_str = 'Northward'
elif in_sign == -1:
dir_str = 'Southward'
a = [x0, x1]; a.sort()
x0 = a[0]; x1 = a[1]
# ---------------------------------------------------------
dt0 = datetime(2018,1,1)
dt1 = datetime(2018,12,31)
# Salinity vs. Time
ax = fig.add_subplot(3,2,ii)
tef_df['Sin'].plot(c='r', lw=lw, ax=ax)
tef_df['Sout'].plot(c='b', lw=lw, ax=ax)
ax.set_xlim(dt0,dt1)
ax.set_ylim(29,33)
ax.set_title(indir.split('/')[-2])
ax.grid(True)
ax.set_xticklabels([])
ax.set_xlabel('')
ax.set_ylabel('Salinity')
ax.text(.03, .95, '(a) Section = ' + sect_name, va='top', weight='bold', transform=ax.transAxes, size=1.2*fs,
bbox=dict(facecolor='w', edgecolor='None', alpha=0.5))
ax.text(.97, .95, '$S_{in}$', ha='right', va='top', weight='bold', color='r',
transform=ax.transAxes, size=1.2*fs,
bbox=dict(facecolor='w', edgecolor='None', alpha=0.5))
ax.text(.97, .05, '$S_{out}$', ha='right', va='bottom', weight='bold', color='b',
transform=ax.transAxes, size=1.2*fs,
bbox=dict(facecolor='w', edgecolor='None', alpha=0.5))
# Tidal Transport vs. Time
ax = fig.add_subplot(3,2,ii+2)
tef_df['Qtide'].plot(c='k', lw=lw, ax=ax)
ax.set_xlim(dt0,dt1)
ax.set_ylim(0,350)
ax.grid(True)
ax.set_xticklabels([])
ax.set_xlabel('')
ax.set_ylabel('')
ax.text(.03, .95, '(b) $Q_{tide}\ [10^{3}m^{3}s^{-1}]$',
va='top', weight='bold', transform=ax.transAxes, size=1.2*fs,
bbox=dict(facecolor='w', edgecolor='None', alpha=0.5))
# Tranport vs. Time
ax = fig.add_subplot(3,2,ii+4)
tef_df['Qin'].plot(c='r', lw=lw, ax=ax)
tef_df['Qout'].plot(c='b', lw=lw, ax=ax)
ax.set_xlim(dt0,dt1)
ax.set_ylim(0,70)
ax.grid(True)
ax.set_xlabel('Date ' + str(dt0.year))
ax.set_ylabel('')
ax.text(.03, .95, '(c) $Q_{in}\ Q_{out}\ [10^{3}m^{3}s^{-1}]$',
va='top', weight='bold', transform=ax.transAxes, size=1.2*fs,
bbox=dict(facecolor='w', edgecolor='None', alpha=0.5))
ii += 1
fig.tight_layout()
plt.savefig(outdir + sect_name + '.png')
plt.show()
plt.rcdefaults()
| 28.328467
| 113
| 0.572533
|
4a08a015f8b283537b22496faefba7a0f5b89666
| 924
|
py
|
Python
|
archive/get_d3_json.py
|
ehrenb/Mercator
|
f87c6aa38d304a62d86a9e4b3a7d96d1f7aba156
|
[
"MIT"
] | 3
|
2018-04-17T18:11:47.000Z
|
2020-05-18T02:15:04.000Z
|
archive/get_d3_json.py
|
ehrenb/Mercator
|
f87c6aa38d304a62d86a9e4b3a7d96d1f7aba156
|
[
"MIT"
] | 3
|
2017-09-25T04:50:42.000Z
|
2017-09-27T00:37:03.000Z
|
archive/get_d3_json.py
|
ehrenb/Mercator
|
f87c6aa38d304a62d86a9e4b3a7d96d1f7aba156
|
[
"MIT"
] | null | null | null |
import json
import os
from flask import render_template, request
from Mercator import app, analysis_dir, graph_types
@app.route('/get_d3_json/<string:md5>')
def get_d3_json(md5):
d3_json = None
graph_type = request.args.get('type', None)
if not graph_type:
error = "missing 'type' GET parameter"
app.logger.error(error)
return d3_json#render_template('error.html', error=error)
if graph_type not in graph_types:
error = "'type' value must one of the following: {graph_types}".format(graph_types=graph_types)
app.logger.error(error)
return d3_json#render_template('error.html', error=error)
md5_analysis_dir = os.path.join(analysis_dir, md5)
with open(os.path.join(md5_analysis_dir, md5)+'_{type}.json'.format(type=graph_type),'r') as f:
d3_json = f.read()#json.load(f)
return d3_json#json.dumps(d3_json)
#return jsonify(d3_json)
| 31.862069
| 103
| 0.698052
|
4a08a022dd53983617a1e6ec1c8fcb20d054adf6
| 409
|
py
|
Python
|
jobmatchings/migrations/0004_matchinghistory_isopentopublic.py
|
MattYu/ConcordiaAce
|
35eff7614652eb548e532dcf00e3a7296855285c
|
[
"MIT"
] | 1
|
2021-06-14T06:54:16.000Z
|
2021-06-14T06:54:16.000Z
|
jobmatchings/migrations/0004_matchinghistory_isopentopublic.py
|
MattYu/ConcordiaAce
|
35eff7614652eb548e532dcf00e3a7296855285c
|
[
"MIT"
] | 34
|
2020-04-05T01:14:31.000Z
|
2022-03-12T00:23:02.000Z
|
jobmatchings/migrations/0004_matchinghistory_isopentopublic.py
|
MattYu/ConcordiaAce
|
35eff7614652eb548e532dcf00e3a7296855285c
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0 on 2020-03-17 18:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobmatchings', '0003_match_jobapplication'),
]
operations = [
migrations.AddField(
model_name='matchinghistory',
name='isOpenToPublic',
field=models.BooleanField(default=False),
),
]
| 21.526316
| 54
| 0.623472
|
4a08a2b8f5bbc624a677623af786a2a60ce640f9
| 3,443
|
py
|
Python
|
src/train.py
|
lindsey98/dml_cross_entropy
|
4312cb295e972abda7b0e2bdadecf1965c5d7ed5
|
[
"BSD-3-Clause"
] | null | null | null |
src/train.py
|
lindsey98/dml_cross_entropy
|
4312cb295e972abda7b0e2bdadecf1965c5d7ed5
|
[
"BSD-3-Clause"
] | null | null | null |
src/train.py
|
lindsey98/dml_cross_entropy
|
4312cb295e972abda7b0e2bdadecf1965c5d7ed5
|
[
"BSD-3-Clause"
] | null | null | null |
import math
import os
import tempfile
from copy import deepcopy
from functools import partial
from pprint import pprint
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.backends import cudnn
from torch.optim import SGD, lr_scheduler
from src.utils import state_dict_to_cpu, SmoothCrossEntropy
from torch.utils.data import DataLoader
from typing import NamedTuple, Optional, Dict, List, Any, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from torchvision import transforms
from src.metrics import AverageMeter
from tqdm import tqdm
import logging
import yaml
import faiss
def get_optimizer_scheduler(cfg: Dict, parameters: Dict, loader_length: int) -> (Optimizer, _LRScheduler):
'''
Initialize optimizer and lr_scheduler
'''
epochs = int(cfg['Train']['epochs'])
lr = float(cfg['Train']['lr'])
momentum = float(cfg['Train']['momentum'])
nesterov = cfg['Train']['nesterov']
weight_decay = float(cfg['Train']['weight_decay'])
scheduler = cfg['Train']['scheduler']
lr_step = cfg['Train']['lr_step']
optimizer = SGD(parameters, lr=lr, momentum=momentum, weight_decay=weight_decay,
nesterov=True if nesterov and momentum else False)
if epochs == 0:
scheduler = None
elif scheduler == 'cos':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs * loader_length, eta_min=0)
elif scheduler == 'warmcos':
warm_cosine = lambda i: min((i + 1) / 100, (1 + math.cos(math.pi * i / (epochs * loader_length))) / 2)
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=warm_cosine)
elif scheduler == 'step':
scheduler = lr_scheduler.StepLR(optimizer, lr_step * loader_length)
elif scheduler == 'warmstep':
warm_step = lambda i: min((i + 1) / 100, 1) * 0.1 ** (i // (lr_step * loader_length))
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=warm_step)
else:
scheduler = lr_scheduler.StepLR(optimizer, epochs * loader_length)
return optimizer, scheduler
def training(model: nn.Module, loader: DataLoader,
labeldict: Dict, class_loss: nn.Module,
optimizer: Optimizer, scheduler: Union[_LRScheduler, None],
epoch: int,
logger) -> nn.Module:
'''
Train model for one epoch
'''
model.train()
device = next(model.parameters()).device
print('Device used: ', device)
to_device = lambda x: x.to(device, non_blocking=True)
loader_length = len(loader)
train_losses = AverageMeter(device=device, length=loader_length)
pbar = tqdm(loader, ncols=80, desc='Training [{:03d}]'.format(epoch))
for i, (batch, labels, indices) in enumerate(pbar):
labels = torch.tensor([labeldict[x] for x in labels.numpy()])
batch, labels, indices = map(to_device, (batch, labels, indices))
logits, features = model(batch)
# loss = class_loss(logits, labels).mean() # Mutual information loss uses CE loss requires logits
loss = class_loss(features, labels) # SoftTripleLoss does not look at logits
optimizer.zero_grad()
loss.backward()
optimizer.step()
if scheduler is not None:
scheduler.step()
train_losses.append(loss)
logger.info('Epoch {} train.loss = {}'.format(epoch, train_losses.last_avg))
return model
| 35.864583
| 110
| 0.675864
|
4a08a328c44cecb196d263826a33a78375e6f6dc
| 329
|
py
|
Python
|
flexmeasures/data/models/charts/test_chart_defaults.py
|
SeitaBV/flexmeasures
|
f715012c9c35d38d3382bd88d36ef86ce9728d10
|
[
"Apache-2.0"
] | 37
|
2021-02-16T11:18:20.000Z
|
2021-11-04T22:04:56.000Z
|
flexmeasures/data/models/charts/test_chart_defaults.py
|
SeitaBV/flexmeasures
|
f715012c9c35d38d3382bd88d36ef86ce9728d10
|
[
"Apache-2.0"
] | 165
|
2021-02-16T15:27:20.000Z
|
2021-12-06T14:19:20.000Z
|
flexmeasures/data/models/charts/test_chart_defaults.py
|
SeitaBV/flexmeasures
|
f715012c9c35d38d3382bd88d36ef86ce9728d10
|
[
"Apache-2.0"
] | 5
|
2021-02-23T12:05:42.000Z
|
2021-11-04T13:58:40.000Z
|
import altair as alt
from flexmeasures.data.models.charts.defaults import FIELD_DEFINITIONS
def test_default_encodings():
"""Check default encodings for valid vega-lite specifications."""
for field_name, field_definition in FIELD_DEFINITIONS.items():
assert alt.StringFieldDefWithCondition(**field_definition)
| 32.9
| 70
| 0.793313
|
4a08a4014fd8cdf058029111df34910f2b16e00f
| 30,641
|
py
|
Python
|
Main.py
|
mikeen97/ProyectoBaseDatosII
|
785e333a69542a25af7c570b2a8d8ebba944a333
|
[
"Apache-2.0"
] | null | null | null |
Main.py
|
mikeen97/ProyectoBaseDatosII
|
785e333a69542a25af7c570b2a8d8ebba944a333
|
[
"Apache-2.0"
] | null | null | null |
Main.py
|
mikeen97/ProyectoBaseDatosII
|
785e333a69542a25af7c570b2a8d8ebba944a333
|
[
"Apache-2.0"
] | null | null | null |
import sys
import couchdb
import ctypes # An included library with Python install.
from PyQt5 import uic, QtWidgets
import json
import requests
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QTableWidget,QTableWidgetItem,QMessageBox,QPushButton
from PyQt5 import QtGui
# -------------------------------------------------------------------------------
qtCreatorFile = "GUI.ui" # Nombre del archivo aquí.
Ui_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)
# Conexion a la BASE DE DATOS
couch = couchdb.Server("http://127.0.0.1:5984")
db = couch['test2']
propietarios=[]
farmaceuticos=[]
productos=[]
productos_laboratorio=[]
productos_almacenes=[]
almacenes=[]
class MyApp(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
##############
##############AGREGAR
##############
# Agregar Farmaceutico
self.bt_Farmaceutico_agregar_2.clicked.connect(self.agregarFarmaceutico)
# Agregar Propietarios
#self.bt_propietario_agregar.clicked.connect(self.agregarPropietario)
# agregar Laboratorios
self.bt_Laboratorio_agregar.clicked.connect(self.agregarLaboratorio)
# Agregar Productos
self.bt_Producto_agregar.clicked.connect(self.agregarProductos)
# AgregarFarmacia
self.bt_Farmacia_Agregar.clicked.connect(self.agregarFarmacia)
################
################ ELIMINAR
################
# prueba eliminar
# eliminar Farmaceutico
self.bt_Farmaceutico_Eliminar_2.clicked.connect(self.AccionEliminarFarmaceutico)
# eliminar Propietario
#self.bt_propietario_Eliminar.clicked.connect(self.AccionEliminarPropietario)
# eliminar Productos
self.bt_Producto_Eliminar.clicked.connect(self.AccionEliminarProducto)
# eliminar Laboratorio
self.bt_Laboratorio_Eliminar.clicked.connect(self.AccionEliminarLaboratorio)
# eliminar Farmacias
self.bt_Farmacia_Eliminar.clicked.connect(self.AccionEliminarFarmacia)
################
################ ACTUALIZAR
################
# Actualizar Farmaceutico
self.bt_Farmaceutico_Modificar_2.clicked.connect(self.AccionModificarFarmaceutico)
# Actualizar Propietario
#self.bt_propietario_Modificar.clicked.connect(self.AccionModificarPropietario)
# Actualizar Laboratorio
self.bt_Laboratorio_Modificar.clicked.connect(self.AccionModificarLaboratorio)
# Actualizar Producto
self.bt_Producto_Modificar.clicked.connect(self.AccionModificarProducto)
#Actualizar Farmacia
self.bt_Farmacia_Modificar.clicked.connect(self.ModificarFarmacia)
################
################ BUSCAR
################
self.bt_FindFarmacia.clicked.connect(self.BuscarFarmacia)
self.bt_FindFarmaceutico_2.clicked.connect(self.BuscarFarmaceutico)
#self.bt_FindPropietario.clicked.connect(self.BuscarPropietaraio)
self.bt_FindLaboratorio.clicked.connect(self.BuscarLaboratorio)
self.bt_FindProducto.clicked.connect(self.BuscarProducto)
################
############### TAB BAR CLICKED
################
self.bt_Farmacia_BuscarProductos.clicked.connect(self.AccionBuscarProductos)
self.bt_load.clicked.connect(self.loadComboboxes)
self.bt_Farmacia_asigPropietarios.clicked.connect(self.asignarPropietario)
self.bt_Farmacia_asigFarmaceuticos.clicked.connect(self.asignarFarmaceuticos)
self.bt_agregarAlmacen.clicked.connect(self.agregarAlmacen)
self.bt_asignaralmancen.clicked.connect(self.asignarAlamacen)
self.bt_loadprodlab.clicked.connect(self.loadPro)
self.bt_Farmacia_BuscarProductos.clicked.connect(self.loadAlmacenesfar)
self.bt_almacen_farmacia.clicked.connect(self.loadProductosfar)
self.bt_load_2.clicked.connect(self.loadProRES)
self.bt_Laboratorio_AgregarProducto.clicked.connect(self.AgregarProductoALAb)
self.bt_Laboratorio_AgregarProducto_3.clicked.connect(self.SetproductosLaboratorio)
self.bt_Farmacia_BuscarProductos_3.clicked.connect(self.CargarTodosLabsConProductos)
self.bt_Farmacia_BuscarProductos_6.clicked.connect(self.AgregarProductos_Cola)
self.bt_Farmacia_BuscarProductos_7.clicked.connect(self.RealizarPedido)
def RealizarPedido(self):
almacen=self.tf_Farmacia_CodFarmaciaAlmacenBuscar_3.text()
docalma=db[almacen]
db.delete(docalma)
for e in productos_almacenes:
for pro in db.get(e[3]).get('productos'):
if(e[0]==pro[0] and int(e[1])<int(pro[1]) and e[2]==pro[2]):
print('Primer if')
doc = db[e[3]]
db.delete(doc)
valor=int(pro[1])-int(e[1])
id = e[3]
nombre = doc.get('nombre')
temp=doc.get('productos')
array=[]
for l in temp:
if(l[0]==pro[0]):
l[1]=valor.__str__()
if(valor>0):
array.append(l)
doc = {
'_id': id,
'nombre': nombre,
'productos': array,
'tipo':'laboratorio'
}
db.save(doc)
doc1 = {
'_id': almacen,
'productos':productos_almacenes,
'tipo':'almacenes'
}
db.save(doc1)
productos_almacenes.clear()
self.tableWidget_9.setRowCount(0)
self.tableWidget_9.setColumnCount(0)
self.tableWidget_9.setHorizontalHeaderItem(0, QTableWidgetItem(""))
self.tableWidget_2.setRowCount(0)
self.tableWidget_2.setColumnCount(0)
self.tableWidget_2.setHorizontalHeaderItem(0, QTableWidgetItem(""))
self.tf_Farmacia_CodFarmaciaAlmacenBuscar_4.setText("")
self.tf_Farmacia_CodFarmaciaAlmacenBuscar_5.setText("")
self.tf_Farmacia_CodFarmaciaAlmacenBuscar_6.setText("")
self.tf_Farmacia_CodFarmaciaAlmacenBuscar_12.setText("")
def modificarProductosEnLaboratorio(idlab,idpro,cantidadNueva):
print('inicio')
def AgregarProductos_Cola(self):
codigo_lab=self.tf_Farmacia_CodFarmaciaAlmacenBuscar_4.text()
codigo_pro=self.tf_Farmacia_CodFarmaciaAlmacenBuscar_5.text()
precio_pro=self.tf_Farmacia_CodFarmaciaAlmacenBuscar_6.text()
cant_pedir=self.tf_Farmacia_CodFarmaciaAlmacenBuscar_12.text()
productos_almacenes.append([codigo_pro,cant_pedir,precio_pro,codigo_lab])
self.tableWidget_9.setRowCount(len(productos_almacenes))
self.tableWidget_9.setColumnCount(4)
self.tableWidget_9.setHorizontalHeaderItem(0, QTableWidgetItem("Nombre"))
self.tableWidget_9.setHorizontalHeaderItem(1, QTableWidgetItem("Codigo"))
self.tableWidget_9.setHorizontalHeaderItem(2, QTableWidgetItem("Precio"))
self.tableWidget_9.setHorizontalHeaderItem(3, QTableWidgetItem("Cantidad"))
i=0
for e in productos_almacenes:
self.tableWidget_9.setItem(i,0, QtWidgets.QTableWidgetItem(db.get(e[0]).get('nombre')))
self.tableWidget_9.setItem(i,1, QtWidgets.QTableWidgetItem(e[0]))
self.tableWidget_9.setItem(i,2, QtWidgets.QTableWidgetItem(e[2]))
self.tableWidget_9.setItem(i,3, QtWidgets.QTableWidgetItem(e[1]))
i=i+1
def CargarTodosLabsConProductos(self):
torequest=requests.get('http://127.0.0.1:5984/test2/_design/list/_view/laboratorios')
doc=torequest.json().get('rows')
print(doc)
self.tableWidget_2.setRowCount(1000)
self.tableWidget_2.setColumnCount(5)
self.tableWidget_2.setHorizontalHeaderItem(0, QTableWidgetItem("Laboratorio"))
self.tableWidget_2.setHorizontalHeaderItem(1, QTableWidgetItem("Nombre"))
self.tableWidget_2.setHorizontalHeaderItem(2, QTableWidgetItem("Codigo"))
self.tableWidget_2.setHorizontalHeaderItem(3, QTableWidgetItem("Cantidad"))
self.tableWidget_2.setHorizontalHeaderItem(4, QTableWidgetItem("Precio"))
i=0
for labs in doc:
for pro in db.get(labs.get('key')).get('productos'):
self.tableWidget_2.setItem(i,0, QtWidgets.QTableWidgetItem(labs.get('key')))
self.tableWidget_2.setItem(i,1, QtWidgets.QTableWidgetItem(db.get(pro[0]).get('nombre')))
self.tableWidget_2.setItem(i,2, QtWidgets.QTableWidgetItem(pro[0]))
self.tableWidget_2.setItem(i,3, QtWidgets.QTableWidgetItem(pro[1]))
self.tableWidget_2.setItem(i,4, QtWidgets.QTableWidgetItem(pro[2]))
i=i+1
i=i+1
def ModificarFarmacia(self):
cod = self.tf_Farmacia_codigoFarm.text()
ciudad = self.tf_Farmacia_Ciudad.text()
departamento = self.tf_Farmacia_departamento.text()
calle = self.tf_Farmacia_calle.text()
alma=db.get(cod).get('almacenes')
prop=db.get(cod).get('propietarios')
farma=db.get(cod).get('farmaceuticos')
doc = {
'_id': cod,
"direccion":{ "ciudad": ciudad,
"departamento": departamento,
"calle":calle
},
"almacenes":alma,
"propietarios":prop,
"farmaceuticos":farma,
"tipo":"farmacia"
}
doc = db[cod]
db.delete(doc)
db.save(doc)
self.bt_load.setEnabled(True)
self.tf_Farmacia_codigoFarm.settext('')
self.tf_Farmacia_Ciudad.settext('')
self.tf_Farmacia_departamento.settext('')
self.tf_Farmacia_calle.settext('')
def AgregarProductoALAb(self):
codigo=self.tf_Laboratorio_IdProduct.text()
cantidad=self.tf_Laboratorio_cantidadProduct.text()
pre_venta=self.tf_Laboratorio_costoVentaProducto.text()
productos_laboratorio.append([codigo,cantidad,pre_venta])
print (productos_laboratorio)
self.tb_lab_agregarproducto_2.setRowCount(len(productos_laboratorio))
self.tb_lab_agregarproducto_2.setColumnCount(5)
self.tb_lab_agregarproducto_2.setHorizontalHeaderItem(0, QTableWidgetItem("Codigo"))
self.tb_lab_agregarproducto_2.setHorizontalHeaderItem(1, QTableWidgetItem("Nombre"))
self.tb_lab_agregarproducto_2.setHorizontalHeaderItem(2, QTableWidgetItem("Cantidad"))
self.tb_lab_agregarproducto_2.setHorizontalHeaderItem(3, QTableWidgetItem("precio coste"))
self.tb_lab_agregarproducto_2.setHorizontalHeaderItem(4, QTableWidgetItem("Precio Venta"))
i=0
for e in productos_laboratorio:
if(i<len(productos_laboratorio)):
self.tb_lab_agregarproducto_2.setItem(i,0, QtWidgets.QTableWidgetItem(e[0]))
self.tb_lab_agregarproducto_2.setItem(i,1, QtWidgets.QTableWidgetItem(db.get(e[0]).get('nombre')))
self.tb_lab_agregarproducto_2.setItem(i,2, QtWidgets.QTableWidgetItem(e[1]))
self.tb_lab_agregarproducto_2.setItem(i,3, QtWidgets.QTableWidgetItem(db.get(e[0]).get('precioCosto')))
self.tb_lab_agregarproducto_2.setItem(i,4, QtWidgets.QTableWidgetItem(e[2]))
else:
break
i=i+1
self.tf_Laboratorio_IdProduct.setText('')
self.tf_Laboratorio_cantidadProduct.setText('')
self.tf_Laboratorio_costoVentaProducto.setText('')
def SetproductosLaboratorio(self):
doc = db[self.tf_Laboratorio_IdProduct_3.text()]
db.delete(doc)
id = self.tf_Laboratorio_IdProduct_3.text()
nombre = doc.get('nombre')
temp=doc.get('productos')
for e in temp:
productos_laboratorio.append(e)
doc = {
'_id': id,
'nombre': nombre,
'productos': productos_laboratorio,
'tipo':'laboratorio'
}
db.save(doc)
self.tf_Laboratorio_id.setText("")
self.tf_Laboratorio_nombre.setText("")
self.tb_lab_agregarproducto_2.setRowCount(0)
self.tb_lab_agregarproducto_2.setColumnCount(0)
self.tb_lab_agregarproducto_2.setHorizontalHeaderItem(0, QTableWidgetItem(""))
self.tb_lab_agregarproducto.setRowCount(0)
self.tb_lab_agregarproducto.setColumnCount(0)
self.tb_lab_agregarproducto.setHorizontalHeaderItem(0, QTableWidgetItem(""))
productos_laboratorio.clear()
def loadProductosfar(self):
doc=db.get(self.tf_alamacenfarma.text())
array=doc.get('productos')
print(array)
self.tableWidget.setRowCount(len(array))
self.tableWidget.setColumnCount(3)
self.tableWidget.setHorizontalHeaderItem(0, QTableWidgetItem("Codigo"))
self.tableWidget.setHorizontalHeaderItem(1, QTableWidgetItem("Nombre"))
self.tableWidget.setHorizontalHeaderItem(2, QTableWidgetItem("Cantidad"))
self.tableWidget.setHorizontalHeaderItem(3, QTableWidgetItem("Precio Venta"))
i=0
for e in array:
self.tableWidget.setItem(i,0, QtWidgets.QTableWidgetItem(e[0]))
self.tableWidget.setItem(i,1, QtWidgets.QTableWidgetItem(db.get(e[0]).get('nombre')))
self.tableWidget.setItem(i,2, QtWidgets.QTableWidgetItem(e[1]))
self.tableWidget.setItem(i,3, QtWidgets.QTableWidgetItem(e[2]))
i=i+1
def loadAlmacenesfar(self):
doc=db.get(self.tf_Farmacia_CodFarmaciaAlmacenBuscar.text())
array=doc.get('almacenes')
print (array)
self.tb_almacenesfarma.setRowCount(len(array))
self.tb_almacenesfarma.setColumnCount(3)
self.tb_almacenesfarma.setHorizontalHeaderItem(0, QTableWidgetItem("Codigo"))
i=0
for e in doc:
if(i<len(array)):
self.tb_almacenesfarma.setItem(i,0, QtWidgets.QTableWidgetItem(array[i]))
else:
break
i=i+1
def loadPro(self):
torequest=requests.get('http://127.0.0.1:5984/test2/_design/list/_view/productos')
doc=torequest.json().get('rows')
self.tb_lab_agregarproducto.setRowCount(len(doc))
self.tb_lab_agregarproducto.setColumnCount(2)
self.tb_lab_agregarproducto.setHorizontalHeaderItem(0, QTableWidgetItem("Codigo"))
self.tb_lab_agregarproducto.setHorizontalHeaderItem(1, QTableWidgetItem("Nombre"))
i=0
for e in doc:
self.tb_lab_agregarproducto.setItem(i,0, QtWidgets.QTableWidgetItem(e.get('key')))
self.tb_lab_agregarproducto.setItem(i,1, QtWidgets.QTableWidgetItem(db.get(e.get('key')).get('nombre')))
i=i+1
def agregarAlmacen(self):
codigo= self.tf_codigo_almacenes.text()
array=[]
doc = {
'_id': codigo,
'productos':array,
'tipo':'almacenes'
}
db.save(doc)
self.tf_codigo_almacenes.setText("")
msgBox = QMessageBox()
msgBox.setText("Se agrego el almacen exitosamente!.")
msgBox.exec_()
def asignarAlamacen(self):
almacenes.append(self.cb_almaneces.currentText())
index=self.cb_almaneces.currentIndex()
self.cb_almaneces.removeItem(index)
self.bt_load.setEnabled(False)
print (almacenes)
msgBox = QMessageBox()
msgBox.setText("Se asigno el almacen exitosamente!.")
msgBox.exec_()
def asignarPropietario(self):
propietarios.append(self.cb_Farmacia_Propietarios.currentText())
index=self.cb_Farmacia_Propietarios.currentIndex()
self.cb_Farmacia_Propietarios.removeItem(index)
self.bt_load.setEnabled(False)
msgBox = QMessageBox()
msgBox.setText("Se asigno el propietario exitosamente!.")
msgBox.exec_()
def asignarFarmaceuticos(self):
farmaceuticos.append(self.cb_Farmacia_farmaceuticos.currentText())
index=self.cb_Farmacia_farmaceuticos.currentIndex()
self.cb_Farmacia_farmaceuticos.removeItem(index)
self.bt_load.setEnabled(False)
msgBox = QMessageBox()
msgBox.setText("Se asigno el farmaceutico exitosamente!.")
msgBox.exec_()
def AccionBuscarProductos(self):
torequest=requests.get('http://127.0.0.1:5984/test2/_design/List_productos/_view/view_list_productos')
doc=torequest.json().get('rows')
def AccionModificarProducto(self):
doc = db[self.tf_Producto_id.text()]
db.delete(doc)
nombre = self.tf_Producto_nombre.text()
fabricante = self.tf_Producto_Fabricante.text()
seguro = self.cb_Producto_TieneProteccion.currentText()
preciocoste=self.tf_Producto_precioCoste.text()
categoria = self.tf_Producto_Categoria.text()
descripcion = self.tf_Producto_DescripcionProducto.toPlainText()
doc = {
'_id': self.tf_Producto_id.text(),
'nombre': nombre,
'fabricante': fabricante,
'precioCosto': preciocoste,
'precioVenta': '',
'cantidad': '',
'seguro': seguro,
'categoria': categoria,
'descripcion': descripcion,
'tipo':"producto"
}
print(doc)
db.save(doc)
self.tf_Producto_id.setText("")
self.tf_Producto_nombre.setText("")
self.tf_Producto_Fabricante.setText("")
self.tf_Producto_precioCoste.setText("")
self.tf_Producto_Categoria.setText("")
self.tf_Producto_DescripcionProducto.setPlainText("")
msgBox = QMessageBox()
msgBox.setText("Se modifico el producto exitosamente!.")
msgBox.exec_()
def AccionModificarLaboratorio(self):
doc = db[self.tf_Laboratorio_id.text()]
db.delete(doc)
id = self.tf_Laboratorio_id.text()
nombre = self.tf_Laboratorio_nombre.text()
temp=doc.get('productos')
for e in temp:
productos_laboratorio.append(e)
doc = {
'_id': id,
'nombre': nombre,
'productos':productos_laboratorio,
'tipo':'laboratorio'
}
db.save(doc)
self.tf_Laboratorio_id.setText("")
self.tf_Laboratorio_nombre.setText("")
msgBox = QMessageBox()
msgBox.setText("Se modifico el laboratorio exitosamente!.")
msgBox.exec_()
def AccionModificarFarmaceutico(self):
doc = db[self.tf_Farmaceutico_id_2.text()]
db.delete(doc)
id = self.tf_Farmaceutico_id_2.text()
nombre = self.tf_Farmaceutico_nombre_2.text()
direccion = self.tf_Farmaceutico_direccion_2.text()
edad = self.tf_Farmaceutico_edad_2.text()
persona = ("Farmaceutico")
doc = {
'_id': id,
'nombre': nombre,
'direccion': direccion,
'edad': edad,
'tipo': 'persona'
}
db.save(doc)
self.tf_Farmaceutico_id_2.setText("")
self.tf_Farmaceutico_nombre_2.setText("")
self.tf_Farmaceutico_direccion_2.setText("")
self.tf_Farmaceutico_edad_2.setText("")
msgBox = QMessageBox()
msgBox.setText("Se modifico el farmaceutico exitosamente!.")
msgBox.exec_()
def loadProRES(self):
doc=db.get(self.codigo_farmacia.text())
array=doc.get('propietarios')
self.tableWidget_3.setRowCount(len(array))
self.tableWidget_3.setColumnCount(2)
self.tableWidget_3.setHorizontalHeaderItem(0, QTableWidgetItem("ID"))
self.tableWidget_3.setHorizontalHeaderItem(1, QTableWidgetItem("Nombre"))
i=0
for e in array:
self.tableWidget_3.setItem(i,0, QtWidgets.QTableWidgetItem(array[i]))
self.tableWidget_3.setItem(i,1, QtWidgets.QTableWidgetItem(db.get(array[i]).get('nombre')))
i=i+1
doc=db.get(self.codigo_farmacia.text())
array=doc.get('farmaceuticos')
self.tableWidget_4.setRowCount(len(array))
self.tableWidget_4.setColumnCount(2)
self.tableWidget_4.setHorizontalHeaderItem(0, QTableWidgetItem("ID"))
self.tableWidget_4.setHorizontalHeaderItem(1, QTableWidgetItem("Nombre "))
i=0
for e in array:
self.tableWidget_4.setItem(i,0, QtWidgets.QTableWidgetItem(array[i]))
self.tableWidget_4.setItem(i,1, QtWidgets.QTableWidgetItem(db.get(array[i]).get('nombre')))
i=i+1
def BuscarProducto(self):
id=self.tf_Producto_id.text()
doc=db.get(id)
self.tf_Producto_nombre.setText(doc.get('nombre'))
self.tf_Producto_Fabricante.setText(doc.get('fabricante'))
self.tf_Producto_precioCoste.setText(doc.get('precioCosto'))
self.tf_Producto_Categoria.setText(doc.get('categoria'))
self.tf_Producto_DescripcionProducto.setPlainText(doc.get('descripcion'))
def BuscarLaboratorio(self):
doc=db.get(self.tf_Laboratorio_id.text())
self.tf_Laboratorio_nombre.setText(doc.get('nombre'))
def BuscarPropietaraio(self):
doc=db.get(self.tf_propietario_id.text())
print(doc)
self.tf_propietario_nombre.setText(doc.get('nombre'))
self.tf_propietario_direccion.setText(doc.get('direccion'))
self.tf_propietario_edad.setText(doc.get('edad'))
def BuscarFarmaceutico(self):
doc=db.get(self.tf_Farmaceutico_id_2.text())
print (doc)
self.tf_Farmaceutico_nombre_2.setText(doc.get('nombre'))
self.tf_Farmaceutico_direccion_2.setText(doc.get('direccion'))
self.tf_Farmaceutico_edad_2.setText(doc.get('edad'))
def BuscarFarmacia(self):
doc=db.get(self.tf_Farmacia_codigoFarm.text())
print (doc)
self.tf_Farmacia_Ciudad.setText(doc.get('direccion').get('ciudad'))
self.tf_Farmacia_departamento.setText(doc.get('direccion').get('departamento'))
self.tf_Farmacia_calle.setText(doc.get('direccion').get('calle'))
def AccionModificarPropietario(self):
doc = db[self.tf_propietario_id.text()]
db.delete(doc)
id = self.tf_propietario_id.text()
nombre = self.tf_propietario_nombre.text()
direccion = self.tf_propietario_direccion.text()
edad = self.tf_propietario_edad.text()
persona = ("Propietario")
doc = {
'_id': id,
'nombre': nombre,
'direccion': direccion,
'edad': edad,
'tipo': persona
}
db.save(doc)
self.tf_propietario_id.setText("")
self.tf_propietario_nombre.setText("")
self.tf_propietario_direccion.setText("")
self.tf_propietario_edad.setText("")
msgBox = QMessageBox()
msgBox.setText("Se modifico el propietario exitosamente!.")
msgBox.exec_()
def AccionEliminarFarmaceutico(self):
doc = db[self.tf_Farmaceutico_id_2.text()]
db.delete(doc)
self.tf_Farmaceutico_id_2.setText("")
self.tf_Farmaceutico_nombre_2.setText("")
self.tf_Farmaceutico_direccion_2.setText("")
self.tf_Farmaceutico_edad_2.setText("")
msgBox = QMessageBox()
msgBox.setText("Se elimino el farmaceutico exitosamente!.")
msgBox.exec_()
def AccionEliminarPropietario(self):
doc = db[self.tf_propietario_id.text()]
db.delete(doc)
self.tf_propietario_id.setText("")
self.tf_propietario_nombre.setText("")
self.tf_propietario_direccion.setText("")
self.tf_propietario_edad.setText("")
msgBox = QMessageBox()
msgBox.setText("Se elimino el propietario exitosamente!.")
msgBox.exec_()
def AccionEliminarProducto(self):
doc = db[self.tf_Producto_id.text()]
db.delete(doc)
self.tf_Producto_id.setText("")
self.tf_Producto_nombre.setText("")
self.tf_Producto_Fabricante.setText("")
self.tf_Producto_precioCoste.setText("")
self.tf_Producto_Categoria.setText("")
self.tf_Producto_DescripcionProducto.setPlainText("")
msgBox = QMessageBox()
msgBox.setText("Se elimino el producto exitosamente!.")
msgBox.exec_()
def AccionEliminarLaboratorio(self):
doc = db[self.tf_Laboratorio_id.text()]
db.delete(doc)
self.tf_Laboratorio_id.setText("")
self.tf_Laboratorio_nombre.setText("")
msgBox = QMessageBox()
msgBox.setText("Se elimino el laboratorio exitosamente!.")
msgBox.exec_()
def AccionEliminarFarmacia(self):
doc = db[self.tf_Farmacia_codigoFarm.text()]
db.delete(doc)
self.bt_load.setEnabled(True)
self.tf_Farmacia_codigoFarm.settext('')
self.tf_Farmacia_Ciudad.settext('')
self.tf_Farmacia_departamento.settext('')
self.tf_Farmacia_calle.settext('')
msgBox = QMessageBox()
msgBox.setText("Se elimino la farmacia exitosamente!.")
msgBox.exec_()
def agregarFarmacia(self):
cod = self.tf_Farmacia_codigoFarm.text()
ciudad = self.tf_Farmacia_Ciudad.text()
departamento = self.tf_Farmacia_departamento.text()
calle = self.tf_Farmacia_calle.text()
doc = {
'_id': cod,
"direccion":{ "ciudad": ciudad,
"departamento": departamento,
"calle":calle
},
"almacenes":almacenes,
"propietarios":propietarios,
"farmaceuticos":farmaceuticos,
"tipo":"farmacia"
}
db.save(doc)
self.bt_load.setEnabled(True)
self.tf_Farmacia_codigoFarm.setText('')
self.tf_Farmacia_Ciudad.setText('')
self.tf_Farmacia_departamento.setText('')
self.tf_Farmacia_calle.setText('')
msgBox = QMessageBox()
msgBox.setText("Se agrego la farmacia exitosamente!.")
msgBox.exec_()
def loadComboboxes(self):
torequest=requests.get('http://127.0.0.1:5984/test2/_design/list/_view/personas')
doc=torequest.json().get('rows')
self.cb_Farmacia_Propietarios.clear()
for e in doc:
self.cb_Farmacia_Propietarios.addItem(e.get('key'))
torequest=requests.get('http://127.0.0.1:5984/test2/_design/list/_view/personas')
doc=torequest.json().get('rows')
self.cb_Farmacia_farmaceuticos.clear()
for e in doc:
self.cb_Farmacia_farmaceuticos.addItem(e.get('key'))
torequest=requests.get('http://127.0.0.1:5984/test2/_design/list/_view/almacenes')
doc=torequest.json().get('rows')
self.cb_almaneces.clear()
for e in doc:
self.cb_almaneces.addItem(e.get('key'))
def loadPropietarios(self):
self.cb_Farmacia_Propietarios.clear()
for e in propietarios:
self.cb_Farmacia_Propietarios.addItem(e)
def agregarProductos(self):
id = self.tf_Producto_id.text()
nombre = self.tf_Producto_nombre.text()
fabricante = self.tf_Producto_Fabricante.text()
seguro = self.cb_Producto_TieneProteccion.currentText()
preciocoste=self.tf_Producto_precioCoste.text()
categoria = self.tf_Producto_Categoria.text()
descripcion = self.tf_Producto_DescripcionProducto.toPlainText()
doc = {
'_id': id,
'nombre': nombre,
'fabricante': fabricante,
'precioCosto': preciocoste,
'precioVenta': '',
'cantidad': '',
'seguro': seguro,
'categoria': categoria,
'descripcion': descripcion,
'tipo':"producto"
}
db.save(doc)
self.tf_Producto_id.setText("")
self.tf_Producto_nombre.setText("")
self.tf_Producto_Fabricante.setText("")
self.tf_Producto_precioCoste.setText("")
self.tf_Producto_Categoria.setText("")
self.tf_Producto_DescripcionProducto.setPlainText("")
msgBox = QMessageBox()
msgBox.setText("Se agrego el producto exitosamente!.")
msgBox.exec_()
def agregarLaboratorio(self):
id = self.tf_Laboratorio_id.text()
nombre = self.tf_Laboratorio_nombre.text()
array=[]
doc = {
'_id': id,
'nombre': nombre,
'productos':array,
'tipo':'laboratorio'
}
db.save(doc)
self.tf_Laboratorio_id.setText("")
self.tf_Laboratorio_nombre.setText("")
msgBox = QMessageBox()
msgBox.setText("Se agrego el laboratorio exitosamente!.")
msgBox.exec_()
def agregarPropietario(self):
id = self.tf_propietario_id.text()
nombre = self.tf_propietario_nombre.text()
direccion = self.tf_propietario_direccion.text()
edad = self.tf_propietario_edad.text()
persona = ("Propietario")
doc = {
'_id': id,
'nombre': nombre,
'direccion': direccion,
'edad': edad,
'tipo': 'persona'
}
db.save(doc)
self.tf_propietario_id.setText("")
self.tf_propietario_nombre.setText("")
self.tf_propietario_direccion.setText("")
self.tf_propietario_edad.setText("")
msgBox = QMessageBox()
msgBox.setText("Se agrego el propietario exitosamente!.")
msgBox.exec_()
def agregarFarmaceutico(self):
id = self.tf_Farmaceutico_id_2.text()
nombre = self.tf_Farmaceutico_nombre_2.text()
direccion = self.tf_Farmaceutico_direccion_2.text()
edad = self.tf_Farmaceutico_edad_2.text()
persona = ("Farmaceutico")
doc = {
'_id': id,
'nombre': nombre,
'direccion': direccion,
'edad': edad,
'tipo': 'persona'
}
db.save(doc)
self.tf_Farmaceutico_id_2.setText("")
self.tf_Farmaceutico_nombre_2.setText("")
self.tf_Farmaceutico_direccion_2.setText("")
self.tf_Farmaceutico_edad_2.setText("")
msgBox = QMessageBox()
msgBox.setText("Se agrego el farmaceutico exitosamente!.")
msgBox.exec_()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
window = MyApp()
window.show()
sys.exit(app.exec_())
| 39.283333
| 119
| 0.638948
|
4a08a5a7f8b6d3d06d11db0136e73a196ac7335e
| 838
|
py
|
Python
|
sympy/deprecated/tests/test_deprecated_imports.py
|
shivangdubey/sympy
|
bd3ddd4c71d439c8b623f69a02274dd8a8a82198
|
[
"BSD-3-Clause"
] | 2
|
2019-05-18T22:36:49.000Z
|
2019-05-24T05:56:16.000Z
|
sympy/deprecated/tests/test_deprecated_imports.py
|
shivangdubey/sympy
|
bd3ddd4c71d439c8b623f69a02274dd8a8a82198
|
[
"BSD-3-Clause"
] | 2
|
2020-08-18T15:21:59.000Z
|
2020-08-18T19:35:29.000Z
|
sympy/deprecated/tests/test_deprecated_imports.py
|
shivangdubey/sympy
|
bd3ddd4c71d439c8b623f69a02274dd8a8a82198
|
[
"BSD-3-Clause"
] | 3
|
2019-05-18T21:32:31.000Z
|
2019-07-26T11:05:46.000Z
|
import sympy
from sympy.testing.pytest import warns_deprecated_sympy
def test_deprecated_imports():
# https://github.com/sympy/sympy/pull/18245
# Before 1.6 these names were importable with e.g.
# from sympy import *
# from sympy import add
# Now sympy/__init__.py uses __all__ so these names are no longer
# accidentally imported. However many of the names now give a warning and
# this test checks that they are importable but a warning is given
from sympy import add
with warns_deprecated_sympy():
add.Add
modnames = type(add)._DEPRECATED_IMPORTS
assert len(modnames) == 80
for modname in modnames:
name = modname.split('.')[-1]
mod = getattr(sympy, name)
attr = dir(mod.mod)[0]
with warns_deprecated_sympy():
getattr(mod, attr)
| 31.037037
| 78
| 0.677804
|
4a08a5f51ff50f26814c35c2412cdf799ac5cffd
| 5,455
|
py
|
Python
|
examples/fit_poc.py
|
cclauss/SparseSC
|
bd5c65f162a5431f92ed957df3385c803f2d3365
|
[
"MIT"
] | null | null | null |
examples/fit_poc.py
|
cclauss/SparseSC
|
bd5c65f162a5431f92ed957df3385c803f2d3365
|
[
"MIT"
] | null | null | null |
examples/fit_poc.py
|
cclauss/SparseSC
|
bd5c65f162a5431f92ed957df3385c803f2d3365
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------
# Programmer: Jason Thorpe
# Date 1/11/2019 1:25:57 PM
# Purpose: Implement round-robin fitting of Sparse Synthetic Controls Model for DGP based analysis
# Description:
#
# This is intended for use in Proofs of concepts where the underlying data
# model is known and the experiment is aimed at understing the extent to which
# the SparseSC model correctly and efficiently estimates the underlying data
# model.
#
# This code therefor repeatedly splits the data into fitting and hold-out sets
# in a round-robin fassion, fits the covariate coefficients in the fitting set,
# applies the covariate weights estimsted in the fitting set to creat weights
# for individual units in the held-out set, and returns the fitted weights and
# synthetic controls for every unit.
#
# Usage:
#
#
# import sys
# import os
# import numpy as np
# repo_path = 'c:\path\to\the\SparseSC\git\repo'
# sys.path.append(repo_path)
# sys.path.append(os.path.join(repo_path,'examples'))
# x = np.random.rand(100,20)
# y = np.random.rand(100,8)
# from fit_poc import fit_poc
# weights, syntetic_y = fit_poc(x,y)
#
#
# --------------------------------------------------------------------------------
import pandas as pd
import numpy as np
import SparseSC as SC
from sklearn.model_selection import KFold
def fit_poc(X,Y,
Lambda_min = 1e-6,
Lambda_max = 1,
grid_points = 20L,
grid = None,
# fold tuning parameters: either a integer or list of test/train subsets such as the result of calling Kfold().split()
outer_folds = 10L,
cv_folds = 10L,
gradient_folds = 10L,
random_state = 10101,
):
if grid is None:
grid = np.exp(np.linspace(np.log(Lambda_min),np.log(Lambda_max),grid_points))
assert X.shape[0] == Y.shape[0]
out_weights = np.zeros( (Y.shape[0],X.shape[0] ))
out_predictions = np.zeros(Y.shape)
try:
iter(outer_folds)
except TypeError:
outer_folds = KFold(outer_folds, shuffle=True, random_state = random_state).split(np.arange(Y.shape[0]))
outer_folds = list(outer_folds)
for i, (train,test) in enumerate(outer_folds):
# --------------------------------------------------
# Phase 0: Data wrangling
# --------------------------------------------------
Xtrain = X[train,:]
Xtest = X[test,:]
Ytrain = Y[train,:]
Ytest = Y[test,:]
# Get the L2 penalty guestimate: very quick ( milliseconds )
L2_PEN_W = SC.L2_pen_guestimate(Xtrain)
# GET THE MAXIMUM LAMBDAS: quick ~ ( seconds to tens of seconds )
LAMBDA_max = SC.get_max_lambda(
Xtrain,
Ytrain,
L2_PEN_W = L2_PEN_W,
grad_splits=gradient_folds,
aggressiveness = 0.2, # initial learning rate
verbose=1)
# --------------------------------------------------
# Phase 1: extract cross fold residual errors for each lambda
# --------------------------------------------------
# SCORES FOR EACH VALUE OF THE GRID: very slow ( minutes to hours )
scores = SC.CV_score( X = Xtrain,
Y = Ytrain,
splits = cv_folds,
LAMBDA = grid * LAMBDA_max,
progress = True,
L2_PEN_W = L2_PEN_W,
grad_splits = gradient_folds)
# GET THE INDEX OF THE BEST SCORE
best_i = np.argmin(scores)
best_lambda = (grid * LAMBDA_max)[best_i]
# --------------------------------------------------
# Phase 2: extract V and weights: slow ( tens of seconds to minutes )
# --------------------------------------------------
best_V = SC.tensor(X = Xtrain,
Y = Ytrain,
LAMBDA = best_lambda,
grad_splits = gradient_folds,
aggressiveness = 0.2)
# GET THE BEST SET OF WEIGHTS
out_of_sample_weights = SC.weights(Xtrain,
Xtest,
V = best_V,
L2_PEN_W = L2_PEN_W)
Y_SC_test = out_of_sample_weights.dot(Ytrain)
# BUILD THE SYNTHETIC CONTROLS
out_weights[test,:] = out_of_sample_weights
out_predictions[test,:] = Y_SC_test
# CALCULATE ERRORS AND R-SQUARED'S
ct_prediction_error = Y_SC_test - Ytest
null_model_error = Ytest - np.mean(Xtest)
betternull_model_error = (Ytest.T - np.mean(Xtest,1)).T
print("#--------------------------------------------------")
print("OUTER FOLD %s OF %s: Group Mean R-squared: %0.3f%%; Individual Mean R-squared: %0.3f%%" % (
i + 1,
len(outer_folds) + 1,
100*(1 - np.power(ct_prediction_error,2).sum() / np.power(null_model_error,2).sum()) ,
100*(1 - np.power(ct_prediction_error,2).sum() /np.power(betternull_model_error,2).sum() )))
print("#--------------------------------------------------")
return out_weights, out_predictions
| 38.415493
| 130
| 0.510174
|
4a08a643370f72386fbd18f2e3224e3f575a6d46
| 13,044
|
py
|
Python
|
python/graphscope/learning/graph.py
|
wuyueandrew/GraphScope
|
9e2d77d83378f85f001b555d06e4dcbf9a6a4260
|
[
"Apache-2.0"
] | 1
|
2021-12-30T02:55:16.000Z
|
2021-12-30T02:55:16.000Z
|
python/graphscope/learning/graph.py
|
lnfjpt/GraphScope
|
917146f86d8387302a2e1de6963115e7568bf3ee
|
[
"Apache-2.0"
] | null | null | null |
python/graphscope/learning/graph.py
|
lnfjpt/GraphScope
|
917146f86d8387302a2e1de6963115e7568bf3ee
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import collections
import json
from copy import deepcopy
try:
from graphlearn import Graph as GLGraph
except ImportError:
GLGraph = object
from graphscope.framework.dag import DAGNode
from graphscope.framework.dag_utils import close_learning_instance
from graphscope.framework.dag_utils import create_learning_instance
from graphscope.framework.errors import InvalidArgumentError
from graphscope.framework.errors import check_argument
class GraphDAGNode(DAGNode):
"""A class represents a learning instance in a DAG.
The following example demonstrates its usage:
.. code:: python
>>> # lazy mode
>>> import graphscope as gs
>>> sess = gs.session(mode="lazy")
>>> g = sess.g() # <graphscope.framework.graph.GraphDAGNode object>
>>> lg = sess.learning(g)
>>> print(lg) # <graphscope.learning.graph.GraphDAGNode object>
>>> lg_graph = sess.run(lg)
>>> print(lg) # <graphscope.learning.grapg.Graph object>
"""
def __init__(self, session, graph, nodes=None, edges=None, gen_labels=None):
"""
See params detail in :meth:`graphscope.Session.graphlearn`
"""
self._session = session
self._graph = graph
self._op = create_learning_instance(self._graph, nodes, edges, gen_labels)
# add op to dag
self._session.dag.add_op(self._op)
def close(self):
"""Close learning instance and release the resources.
Returns:
:class:`graphscope.learning.graph.ClosedLearningInstance`
"""
op = close_learning_instance(self)
return ClosedLearningInstance(self._session, op)
class Graph(GLGraph):
def __init__(self, graph_node, handle, config=None, object_id=None):
"""Initialize a graph for the learning engine using a handle."""
self.graph_node = graph_node
self.graphscope_session = self.graph_node.session
# copy and set op evaluated
self.graph_node.op = deepcopy(self.graph_node.op)
self.graph_node.evaluated = True
self.graphscope_session.dag.add_op(self.graph_node.op)
handle = self.decode_arg(handle)
config = self.decode_arg(config)
if config is None:
if "config" in handle:
config = handle["config"]
if config is None:
config = collections.defaultdict(lambda: dict)
if object_id is None:
object_id = handle["vineyard_id"]
self.handle = handle
self.config = config
self.object_id = object_id
self.closed = False
super(Graph, self).__init__()
self.vineyard(handle, config["nodes"], config["edges"])
for label, node_attr in config["node_attributes"].items():
n_ints, n_floats, n_strings = (
node_attr[1][0],
node_attr[1][1],
node_attr[1][2],
)
self.node_attributes(label, node_attr[0], n_ints, n_floats, n_strings)
for label, edge_attr in config["edge_attributes"].items():
n_ints, n_floats, n_strings = (
edge_attr[1][0],
edge_attr[1][1],
edge_attr[1][2],
)
self.edge_attributes(label, edge_attr[0], n_ints, n_floats, n_strings)
for node_view_label, node_label, nsplit, split_range in config["gen_labels"]:
self.node_view(
node_view_label, node_label, nsplit=nsplit, split_range=split_range
)
# server_own=False: make sure the glog inside graph-learn get initialized
self.init_vineyard(worker_index=0, worker_count=1, server_own=False)
def decode_arg(self, arg):
if arg is None or isinstance(arg, dict):
return arg
return json.loads(base64.b64decode(arg.encode("utf-8")).decode("utf-8"))
def close(self):
if not self.closed and not self.graphscope_session.closed:
self.closed = True
super(Graph, self).close() # close client first
# close server instance
if self.graphscope_session is not None:
self.graphscope_session._wrapper(self.graph_node.close())
self.graphscope_session._close_learning_instance(self)
@staticmethod # noqa: C901
def preprocess_args(handle, nodes, edges, gen_labels): # noqa: C901
handle = json.loads(base64.b64decode(handle).decode("utf-8", errors="ignore"))
node_names = []
node_attributes = {}
edge_names = []
edge_attributes = {}
def selected_property_schema(attr_types, attributes):
prop_counts = collections.defaultdict(lambda: 0)
for attr in attributes:
prop_counts[attr_types[attr]] += 1
return [prop_counts["i"], prop_counts["f"], prop_counts["s"]]
if nodes is not None:
for node in nodes:
if isinstance(node, str):
if node in node_names:
raise InvalidArgumentError("Duplicate node type: %s" % node)
node_names.append(node)
elif isinstance(node, tuple):
if node[0] in node_names:
raise InvalidArgumentError("Duplicate node type: %s" % node[0])
node_names.append(node[0])
attr_types = handle["node_attribute_types"][node[0]]
attr_schema = selected_property_schema(attr_types, node[1])
node_attributes[node[0]] = (node[1], attr_schema)
else:
raise InvalidArgumentError(
"The node parameter is in bad format: %s" % node
)
else:
for node in handle["node_schema"]:
node_names.append(node.split(":")[0])
if edges is not None:
for edge in edges:
if isinstance(edge, str):
if len(node_names) > 1:
raise InvalidArgumentError(
"Cannot inference edge type when multiple kinds of nodes exists"
)
edge_names.append((node_names[0], edge, node_names[0]))
elif (
isinstance(edge, tuple)
and isinstance(edge[0], str)
and isinstance(edge[1], str)
):
edge_names.append(edge)
elif (
isinstance(edge, tuple)
and isinstance(edge[0], str)
and isinstance(edge[1], list)
):
if len(node_names) > 1:
raise InvalidArgumentError(
"Cannot inference edge type when multiple kinds of nodes exists"
)
edge_names.append((node_names[0], edge[0], node_names[0]))
attr_types = handle["edge_attribute_types"][edge[0]]
attr_schema = selected_property_schema(attr_types, edge[1])
edge_attributes[edge[0]] = (edge[1], attr_schema)
elif (
isinstance(edge, tuple)
and isinstance(edge[0], (list, tuple))
and isinstance(edge[1], list)
):
edge_names.append(edge[0])
attr_types = handle["edge_attribute_types"][edge[0][1]]
attr_schema = selected_property_schema(attr_types, edge[1])
edge_attributes[edge[0][1]] = (edge[1], attr_schema)
else:
raise InvalidArgumentError(
"The edge parameter is in bad format: %s" % edge
)
split_groups = collections.defaultdict(list)
if gen_labels is not None:
for label in gen_labels:
if len(label) == 3 or len(label) == 4:
split_groups[label[1]].append(label)
else:
raise InvalidArgumentError(
"Bad gen_labels arguments: %s" % gen_labels
)
split_labels = []
for label, group in split_groups.items():
lengths = [len(split) for split in group]
check_argument(
lengths[:-1] == lengths[1:], "Invalid gen labels: %s" % group
)
if len(group[0]) == 3:
length_sum = sum(split[2] for split in group)
s, ss = 0, []
for split in group:
ss.append((s, s + split[2]))
s += split[2]
group = [
(split[0], split[1], length_sum, s) for split, s in zip(group, ss)
]
for split in group:
split_labels.append(split)
return {
"nodes": node_names if node_names else None,
"edges": edge_names if edge_names else None,
"node_attributes": node_attributes,
"edge_attributes": edge_attributes,
"gen_labels": split_labels,
}
def get_handle(self, worker_count=1):
"""Return a base64-encoded handle for distributed training."""
handle_copy = self.handle.copy()
handle_copy["config"] = self.config
handle_copy["client_count"] = worker_count
return base64.b64encode(json.dumps(handle_copy).encode("utf-8")).decode("utf-8")
def V(self, t, feed=None):
"""Entry of Gremlin-like query. Start from node.
Args:
t (string): The type of node which is the entry of query or the type
of edge when node is from edge source or dst.
feed (None| numpy.ndarray | types.GeneratorType | `Nodes`): When `feed`
is not `None`, the `type` should be a node type, which means query the
attributes of the specified node ids.
None: Default. Sample nodes with the following .shuffle and .batch API.
numpy.ndarray: Any shape of ids. Get nodes of the given ids and
node_type.
types.Generator: A generator of numpy.ndarray. Get nodes of generated
ids and given node_type.
`Nodes`: A `Nodes` object.
Return:
A 'Query' object.
Example:
.. code:: python
>>> import numpy as np
>>> g.V("user").shuffle().batch(64)
>>> g.V("user", feed=np.array([1, 2, 3]))
>>> def gen():
>>> while True:
>>> yield np.array([1, 2, 3])
>>> gen = gen()
>>> g.V("user", feed=gen)
"""
return super(Graph, self).V(t, feed)
def E(self, edge_type, feed=None, reverse=False):
"""Entry of Gremlin-like query. Start from edge.
Args:
edge_type (string): The type of edge which is the entry of query.
feed (None| (np.ndarray, np.ndarray) | types.GeneratorType | `Edges`):
None: Default. Sample edges with the following .shuffle and .batch API.
(np.ndarray, np.ndarray): src_ids, dst_ids. Get edges of the given
(src_ids, dst_ids) and given edge_type. src_ids and dst_ids must be
the same shape, dtype is int.
types.Generator: A generator of (numpy.ndarray, numpy.ndarray). Get
edges of generated (src_ids, dst_ids) and given edge_type.
`Edges`: An `Edges` object.
Return:
A 'Query' object.
Example:
.. code:: python
>>> import numpy as np
>>> g.E("buy").shuffle().batch(64)
>>> g.E("buy", feed=(np.array([1, 2, 3]), np.array([4, 5, 6]))
>>> def gen():
>>> while True:
>>> yield (np.array([1, 2, 3]), np.array([4, 5, 6]))
>>> gen = gen()
>>> g.E("buy", feed=gen)
"""
return super(Graph, self).E(edge_type, feed, reverse)
class ClosedLearningInstance(DAGNode):
"""Closed learning instance node in a DAG."""
def __init__(self, session, op):
self._session = session
self._op = op
# add op to dag
self._session.dag.add_op(self._op)
| 39.171171
| 92
| 0.561101
|
4a08a64383867b8da4d1206c66d4730c0743f04c
| 1,538
|
py
|
Python
|
tests/test_esys_get_random.py
|
pdxjohnny/tpm2-pytss
|
484e3592208f196bec6381f36fdcdfd99bd019a3
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_esys_get_random.py
|
pdxjohnny/tpm2-pytss
|
484e3592208f196bec6381f36fdcdfd99bd019a3
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_esys_get_random.py
|
pdxjohnny/tpm2-pytss
|
484e3592208f196bec6381f36fdcdfd99bd019a3
|
[
"BSD-2-Clause"
] | null | null | null |
import random
from contextlib import ExitStack
from tpm2_pytss.binding import *
from tpm2_pytss.esys import InvalidArgumentError
from .base_esys import BaseTestESYS
class TestGetRandom(BaseTestESYS):
def test_random_length(self):
length = random.randint(8, 32)
array = self.esys_ctx.get_random(length)
self.assertEqual(length, len(array))
def test_invalid_length(self):
with self.assertRaises(InvalidArgumentError):
self.esys_ctx.get_random(65)
def test_start_auth_session(self):
with ExitStack() as stack:
symmetric = TPMT_SYM_DEF(
algorithm=TPM2_ALG_AES,
keyBits=TPMU_SYM_KEY_BITS(aes=128),
mode=TPMU_SYM_MODE(aes=TPM2_ALG_CFB),
)
symmetric_ptr = stack.enter_context(symmetric.ptr())
session = stack.enter_context(
self.esys_ctx.auth_session(
ESYS_TR_NONE,
ESYS_TR_NONE,
ESYS_TR_NONE,
ESYS_TR_NONE,
ESYS_TR_NONE,
None,
TPM2_SE_HMAC,
symmetric_ptr,
TPM2_ALG_SHA1,
)
)
self.esys_ctx.TRSess_SetAttributes(
session, TPMA_SESSION_AUDIT, TPMA_SESSION_AUDIT
)
length = 48
array = self.esys_ctx.get_random(length, shandle1=session)
self.assertEqual(length, len(array))
| 27.464286
| 70
| 0.572172
|
4a08a65cbe79c6fc32298b441f863bfa7155c88a
| 1,767
|
py
|
Python
|
python/raspberrypi/examples/scan_modbus_id.py
|
cdjq/DFRobot_RTU
|
e4d028756bf152feb5b31ae6cc602de3fe69fa6a
|
[
"MIT"
] | null | null | null |
python/raspberrypi/examples/scan_modbus_id.py
|
cdjq/DFRobot_RTU
|
e4d028756bf152feb5b31ae6cc602de3fe69fa6a
|
[
"MIT"
] | null | null | null |
python/raspberrypi/examples/scan_modbus_id.py
|
cdjq/DFRobot_RTU
|
e4d028756bf152feb5b31ae6cc602de3fe69fa6a
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
'''
# scan_modbus_id.py
#
# @brief 扫描modbus总线上,所有串口配置为9600波特率,8位数据位,无校验位,1位停止位的modbus从机的地址。
# @n modbus从机设备地址范围为1~247(0x01~0xF7),0为广播地址,所有modbus从机接受到广播包都会处理,但不会响应。
# @n 一个modbus主机可以连多个modbus从机,在运行此demo之前,必须知道modbus从机的波特率,数据位,校验位,停止位等串口配置。
#
# @n connected
# -----------------------------------------------------------------------------
# sensor pin | MCU | raspberry pi |
# VCC | 3.3V/5V | 5V/3V3 |
# GND | GND | GND |
# RX | TX | (BCM)14 TX |
# TX | RX | (BCM)15 RX |
# -----------------------------------------------------------------------------
#
# @copyright Copyright (c) 2010 DFRobot Co.Ltd (http://www.dfrobot.com)
# @licence The MIT License (MIT)
# @author [Arya](xue.peng@dfrobot.com)
# @version V1.0
# @date 2021-07-16
# @https://github.com/DFRobot/DFRobot_RTU
'''
import sys
import os
import time
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from DFRobot_RTU import *
modbus = DFRobot_RTU(9600, 8, 'N', 1)
if __name__ == "__main__":
DEVICE_ID_REG = 0x02
while True:
modbus_id = 1
n_devices = 0
print("Scanning...")
while modbus_id < 248:
ret = modbus.read_holding_register(modbus_id, DEVICE_ID_REG)
if ret == modbus_id:
print("modbus device found at address 0x%02X !"%modbus_id)
n_devices += 1
modbus_id += 1
if n_devices == 0:
print("No modbus devices found\n")
else:
print("done\n")
time.sleep(1)
| 33.339623
| 81
| 0.485569
|
4a08a67765d22631d223dc9f4e38324416fb71ec
| 12,338
|
py
|
Python
|
qa/rpc-tests/p2p-acceptblock.py
|
fargocoin/fargocoind
|
f951c3a1e2459ef564e8a5ff048bfc688e8a5646
|
[
"MIT"
] | 7
|
2018-01-21T13:10:40.000Z
|
2018-07-07T06:53:13.000Z
|
qa/rpc-tests/p2p-acceptblock.py
|
fargocoin/fargocoind
|
f951c3a1e2459ef564e8a5ff048bfc688e8a5646
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/p2p-acceptblock.py
|
fargocoin/fargocoind
|
f951c3a1e2459ef564e8a5ff048bfc688e8a5646
|
[
"MIT"
] | 2
|
2018-06-09T17:56:58.000Z
|
2018-11-04T15:33:34.000Z
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import FargoCoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
AcceptBlockTest -- test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class AcceptBlockTest(FargoCoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="bitcoind binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
binary=self.options.testbinary))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-whitelist=127.0.0.1"],
binary=self.options.testbinary))
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = TestNode() # connects to node0 (not whitelisted)
white_node = TestNode() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int ("0x" + n.getbestblockhash() + "L", 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in xrange(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
print "First height 2 block accepted by both nodes"
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in xrange(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
print "Second height 2 block accepted only from whitelisted peer"
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in xrange(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
try:
self.nodes[0].getblock(blocks_h3[0].hash)
print "Unrequested more-work block accepted from non-whitelisted peer"
except:
raise AssertionError("Unrequested more work block was not processed")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
print "Successfully reorged to length 3 chain from whitelisted peer"
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in xrange(2):
for i in xrange(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
for x in all_blocks:
try:
self.nodes[0].getblock(x.hash)
if x == all_blocks[287]:
raise AssertionError("Unrequested block too far-ahead should have been ignored")
except:
if x == all_blocks[287]:
print "Unrequested block too far-ahead not processed"
else:
raise AssertionError("Unrequested block with more work should have been accepted")
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
try:
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
print "Unrequested block far ahead of tip accepted from whitelisted peer"
except:
raise AssertionError("Unrequested block from whitelisted peer not accepted")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
print "Unrequested block that would complete more-work chain was ignored"
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_getdata = None
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_getdata
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
print "Inv at tip triggered getdata for unprocessed block"
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
print "Successfully reorged to longer chain from non-whitelisted peer"
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
| 42.398625
| 107
| 0.651564
|
4a08a6b5ae229b2d52672d9c5864d19888d82704
| 2,155
|
py
|
Python
|
app.py
|
Brundha1996/BRUNDHA
|
02009f71b22d8e42e99d8d9ab82c579cdb6b53ef
|
[
"MIT"
] | null | null | null |
app.py
|
Brundha1996/BRUNDHA
|
02009f71b22d8e42e99d8d9ab82c579cdb6b53ef
|
[
"MIT"
] | 14
|
2020-06-05T23:59:10.000Z
|
2022-03-12T00:29:25.000Z
|
app.py
|
harit198/Web-App-Car-Price-Predictor
|
04a30e728bad391a4083f5ac17f02f36a2112b24
|
[
"MIT"
] | null | null | null |
import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
import pandas as pd
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb'))
def Convert(d):
Year=d.get("Year")
Year=int(Year)
Owner_Type=d.get("Owner_Type")
Owner_Type=int(Owner_Type)
Mileage=d.get("Mileage")
Mileage=float(Mileage)
Engine=d.get("Engine")
Engine=int(Engine)
Power=d.get("Power")
Power=float(Power)
Seats=d.get("Seats")
Seats=int(Seats)
if d.get("Fuel_Type")== "Diesel":
Fuel_Type_Diesel=1
Fuel_Type_LPG=0
Fuel_Type_Petrol=0
elif d.get("Fuel_Type")== "Petrol":
Fuel_Type_Diesel=0
Fuel_Type_LPG=0
Fuel_Type_Petrol=1
else :
Fuel_Type_Diesel=0
Fuel_Type_LPG=1
Fuel_Type_Petrol=0
if d.get("Transmission")=="Manual":
Transmission_Manual=1
else:
Transmission_Manual=0
def create_dict(Year,Owner_Type,Mileage,Engine,Power,Seats,Fuel_Type_Diesel,Fuel_Type_LPG,Fuel_Type_Petrol,Transmission_Manual):
final_dict=[{"Year":Year,"Owner_Type":Owner_Type,"Mileage":Mileage,"Engine":Engine,"Power":Power,
"Seats":Seats,"Fuel_Type_Diesel":Fuel_Type_Diesel,"Fuel_Type_LPG":Fuel_Type_LPG,
"Fuel_Type_Petrol":Fuel_Type_Petrol,
"Transmission_Manual":Transmission_Manual
}]
final=pd.DataFrame(final_dict)
pred_t=model.predict(final)
return pred_t
pred=create_dict(Year,Owner_Type,Mileage,Engine,Power,Seats,Fuel_Type_Diesel,Fuel_Type_LPG,Fuel_Type_Petrol,Transmission_Manual)
return pred
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST'])
def predict():
d=request.form
pred_1=Convert(d)
return render_template('result.html', prediction_text="Your Car's Price Should Be {:.2f} lakhs".format(float(pred_1)))
if __name__ == "__main__":
app.run(debug=True)
| 24.488636
| 133
| 0.625058
|
4a08a84fad6c09b225982178155112d5e37460af
| 2,519
|
py
|
Python
|
ibis/backends/tests/test_param.py
|
GrapeBaBa/ibis
|
507bb14efdcfd719a0487ee23fe1c85c177517f6
|
[
"Apache-2.0"
] | 986
|
2017-06-07T07:33:01.000Z
|
2022-03-31T13:00:46.000Z
|
ibis/backends/tests/test_param.py
|
GrapeBaBa/ibis
|
507bb14efdcfd719a0487ee23fe1c85c177517f6
|
[
"Apache-2.0"
] | 2,623
|
2017-06-07T18:29:11.000Z
|
2022-03-31T20:27:31.000Z
|
ibis/backends/tests/test_param.py
|
GrapeBaBa/ibis
|
507bb14efdcfd719a0487ee23fe1c85c177517f6
|
[
"Apache-2.0"
] | 238
|
2017-06-26T19:02:58.000Z
|
2022-03-31T15:18:29.000Z
|
import collections
import pytest
import ibis
import ibis.expr.datatypes as dt
@pytest.mark.parametrize(
('column', 'raw_value'),
[
('double_col', 0.0),
('double_col', 10.1),
('float_col', 1.1),
('float_col', 2.2),
],
)
@pytest.mark.xfail_unsupported
def test_floating_scalar_parameter(backend, alltypes, df, column, raw_value):
value = ibis.param(dt.double)
expr = alltypes[column] + value
expected = df[column] + raw_value
result = expr.execute(params={value: raw_value})
expected = backend.default_series_rename(expected)
backend.assert_series_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize(
('start_string', 'end_string'),
[('2009-03-01', '2010-07-03'), ('2014-12-01', '2017-01-05')],
)
@pytest.mark.xfail_unsupported
def test_date_scalar_parameter(
backend, alltypes, df, start_string, end_string
):
start, end = ibis.param(dt.date), ibis.param(dt.date)
col = alltypes.timestamp_col.date()
expr = col.between(start, end)
expected_expr = col.between(start_string, end_string)
result = expr.execute(params={start: start_string, end: end_string})
expected = expected_expr.execute()
backend.assert_series_equal(result, expected)
@pytest.mark.xfail_backends(['pyspark'])
@pytest.mark.xfail_unsupported
def test_timestamp_accepts_date_literals(backend, alltypes):
date_string = '2009-03-01'
param = ibis.param(dt.timestamp)
expr = alltypes.mutate(param=param)
params = {param: date_string}
assert expr.compile(params=params) is not None
@pytest.mark.xfail_backends(['pyspark'])
@pytest.mark.xfail_unsupported
def test_scalar_param_array(backend, con):
value = [1, 2, 3]
param = ibis.param(dt.Array(dt.int64))
result = con.execute(param.length(), params={param: value})
assert result == len(value)
@pytest.mark.xfail_unsupported
def test_scalar_param_struct(backend, con):
value = collections.OrderedDict([('a', 1), ('b', 'abc'), ('c', 3.0)])
param = ibis.param(
dt.Struct.from_tuples(
[('a', 'int64'), ('b', 'string'), ('c', 'float64')]
)
)
result = con.execute(param['a'], params={param: value})
assert result == value['a']
@pytest.mark.xfail_unsupported
def test_scalar_param_map(backend, con):
value = {'a': 'ghi', 'b': 'def', 'c': 'abc'}
param = ibis.param(dt.Map(dt.string, dt.string))
result = con.execute(param['b'], params={param: value})
assert result == value['b']
| 29.635294
| 77
| 0.670107
|
4a08a8c46e0a556b68637623fe0b52b3454bf6e7
| 10,041
|
py
|
Python
|
library/ptpulse/microphone.py
|
opensourcekids/pi-topPULSE
|
d471f777a15e54a0788e988a325c3e4574638ab7
|
[
"Apache-2.0"
] | null | null | null |
library/ptpulse/microphone.py
|
opensourcekids/pi-topPULSE
|
d471f777a15e54a0788e988a325c3e4574638ab7
|
[
"Apache-2.0"
] | null | null | null |
library/ptpulse/microphone.py
|
opensourcekids/pi-topPULSE
|
d471f777a15e54a0788e988a325c3e4574638ab7
|
[
"Apache-2.0"
] | 1
|
2021-01-22T04:17:43.000Z
|
2021-01-22T04:17:43.000Z
|
# microphone.py (pi-topPULSE)
# Copyright (C) 2017 CEED ltd.
#
import codecs
import binascii
import math
from tempfile import mkstemp
import os
import serial
import signal
import struct
import sys
from threading import Thread
import time
# local
from ptpulse import configuration
_debug = False
_bitrate = 8
_continue_writing = False
_recording_thread = False
_thread_running = False
_exiting = False
_temp_file_path = ""
#######################
# INTERNAL OPERATIONS #
#######################
def _debug_print(message):
"""INTERNAL. Print messages if debug mode enabled."""
if _debug == True:
print(message)
def _signal_handler(signal, frame):
"""INTERNAL. Handles signals from the OS."""
global _exiting
if _exiting == False:
_exiting = True
if _thread_running == True:
stop()
print("\nQuitting...")
sys.exit(0)
def _get_size(filename):
"""INTERNAL. Gets the size of a file."""
file_stats = os.stat(filename)
return file_stats.st_size
def _from_hex(value):
"""INTERNAL. Gets a bytearray from hex data."""
return bytearray.fromhex(value)
def _space_separated_little_endian(integer_value, byte_len):
"""INTERNAL. Get an integer in format for WAV file header."""
if byte_len <= 1:
pack_type = '<B'
elif byte_len <= 2:
pack_type = '<H'
elif byte_len <= 4:
pack_type = '<I'
elif byte_len <= 8:
pack_type = '<Q'
else:
print("Value cannot be represented in 8 bytes - exiting")
sys.exit()
hex_string = struct.pack(pack_type, integer_value)
temp = binascii.hexlify(hex_string).decode()
return ' '.join([temp[i:i+2] for i in range(0, len(temp), 2)])
def _init_header_information():
"""INTERNAL. Create a WAV file header."""
RIFF = "52 49 46 46"
WAVE = "57 41 56 45"
fmt = "66 6d 74 20"
DATA = "64 61 74 61"
if configuration.microphone_sample_rate_is_22khz():
capture_sample_rate = 22050
else:
capture_sample_rate = 16000
header = _from_hex(RIFF) # ChunkID
header += _from_hex(_space_separated_little_endian(0, 4)) # ChunkSize - 4 bytes (to be changed depending on length of data...)
header += _from_hex(WAVE) # Format
header += _from_hex(fmt) # Subchunk1ID
header += _from_hex(_space_separated_little_endian(16, 4)) # Subchunk1Size (PCM = 16)
header += _from_hex(_space_separated_little_endian(1, 2)) # AudioFormat (PCM = 1)
header += _from_hex(_space_separated_little_endian(1, 2)) # NumChannels
header += _from_hex(_space_separated_little_endian(capture_sample_rate, 4)) # SampleRate
header += _from_hex(_space_separated_little_endian(capture_sample_rate, 4)) # ByteRate (Same as SampleRate due to 1 channel, 1 byte per sample)
header += _from_hex(_space_separated_little_endian(1, 2)) # BlockAlign - (no. of bytes per sample)
header += _from_hex(_space_separated_little_endian(_bitrate, 2)) # BitsPerSample
header += _from_hex(DATA) # Subchunk2ID
header += _from_hex(_space_separated_little_endian(0, 4)) # Subchunk2Size - 4 bytes (to be changed depending on length of data...)
return header
def _update_header_in_file(file, position, value):
"""INTERNAL. Update the WAV header """
hex_value = _space_separated_little_endian(value, 4)
data = binascii.unhexlify(''.join(hex_value.split()))
file.seek(position)
file.write(data)
def _finalise_wav_file(file_path):
"""INTERNAL. Update the WAV file header with the size of the data."""
size_of_data = _get_size(file_path) - 44
if size_of_data <= 0:
print("Error: No data was recorded!")
os.remove(file_path)
else:
with open(file_path, 'rb+') as file:
_debug_print("Updating header information...")
_update_header_in_file(file, 4, size_of_data + 36)
_update_header_in_file(file, 40, size_of_data)
def _thread_method():
"""INTERNAL. Thread method."""
_record_audio()
def _record_audio():
"""INTERNAL. Open the serial port and capture audio data into a temp file."""
global _temp_file_path
temp_file_tuple = mkstemp()
os.close(temp_file_tuple[0])
_temp_file_path = temp_file_tuple[1]
if os.path.exists('/dev/serial0'):
_debug_print("Opening serial device...")
serial_device = serial.Serial(port = '/dev/serial0', timeout = 1, baudrate = 250000, parity = serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE, bytesize = serial.EIGHTBITS)
serial_device_open = serial_device.isOpen()
if serial_device_open == True:
try:
_debug_print("Start recording")
with open(_temp_file_path, 'wb') as file:
_debug_print("WRITING: initial header information")
file.write(_init_header_information())
if serial_device.inWaiting():
_debug_print("Flushing input and starting from scratch")
serial_device.flushInput()
_debug_print("WRITING: wave data")
while _continue_writing:
while not serial_device.inWaiting():
time.sleep(0.01)
audio_output = serial_device.read(serial_device.inWaiting())
data_to_write = ""
bytes_to_write = bytearray()
for pcm_data_block in audio_output:
if _bitrate == 16:
pcm_data_int = 0
if sys.version_info >= (3, 0):
pcm_data_int = pcm_data_block
scaled_val = int((pcm_data_int * 32768) / 255)
bytes_to_write += _from_hex(_space_separated_little_endian(scaled_val, 2))
else:
pcm_data_int = ord(pcm_data_block)
scaled_val = int((pcm_data_int * 32768) / 255)
data_to_write += _from_hex(_space_separated_little_endian(scaled_val, 2))
else:
if sys.version_info >= (3, 0):
pcm_data_int = pcm_data_block
bytes_to_write += _from_hex(_space_separated_little_endian(pcm_data_int, 1))
else:
pcm_data_int = ord(pcm_data_block)
data_to_write += _from_hex(_space_separated_little_endian(pcm_data_int, 1))
if sys.version_info >= (3, 0):
file.write(bytes_to_write)
else:
file.write(data_to_write)
time.sleep(0.1)
finally:
serial_device.close()
_finalise_wav_file(_temp_file_path)
_debug_print("Finished Recording.")
else:
print("Error: Serial port failed to open")
else:
print("Error: Could not find serial port - are you sure it's enabled?")
#######################
# EXTERNAL OPERATIONS #
#######################
def record():
"""Start recording on the pi-topPULSE microphone."""
global _thread_running
global _continue_writing
global _recording_thread
if not configuration.mcu_enabled():
print("Error: pi-topPULSE is not initialised.")
sys.exit()
if _thread_running == False:
_thread_running = True
_continue_writing = True
_recording_thread = Thread(group=None, target=_thread_method)
_recording_thread.start()
else:
print("Microphone is already recording!")
def is_recording():
"""Returns recording state of the pi-topPULSE microphone."""
return _thread_running
def stop():
"""Stops recording audio"""
global _thread_running
global _continue_writing
_continue_writing = False
_recording_thread.join()
_thread_running = False
def save(file_path, overwrite=False):
"""Saves recorded audio to a file."""
global _temp_file_path
if _thread_running == False:
if _temp_file_path != "" and os.path.exists(_temp_file_path):
if os.path.exists(file_path) == False or overwrite == True:
if os.path.exists(file_path):
os.remove(file_path)
os.rename(_temp_file_path, file_path)
_temp_file_path = ""
else:
print("File already exists")
else:
print("No recorded audio data found")
else:
print("Microphone is still recording!")
def set_sample_rate_to_16khz():
"""Set the appropriate I2C bits to enable 16,000Hz recording on the microphone"""
configuration.set_microphone_sample_rate_to_16khz()
def set_sample_rate_to_22khz():
"""Set the appropriate I2C bits to enable 22,050Hz recording on the microphone"""
configuration.set_microphone_sample_rate_to_22khz()
def set_bit_rate_to_unsigned_8():
"""Set bitrate to device default"""
global _bitrate
_bitrate = 8
def set_bit_rate_to_signed_16():
"""Set bitrate to double that of device default by scaling the signal"""
global _bitrate
_bitrate = 16
#######################
# INITIALISATION #
#######################
_signal = signal.signal(signal.SIGINT, _signal_handler)
| 30.427273
| 182
| 0.578628
|
4a08a9f38d0708738d2a404fb275688efe7bbd7f
| 2,286
|
py
|
Python
|
ucsmsdk/mometa/gmeta/GmetaPolicyMapElement.py
|
anoop1984/python_sdk
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/gmeta/GmetaPolicyMapElement.py
|
anoop1984/python_sdk
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/gmeta/GmetaPolicyMapElement.py
|
anoop1984/python_sdk
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains the general information for GmetaPolicyMapElement ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class GmetaPolicyMapElementConsts():
pass
class GmetaPolicyMapElement(ManagedObject):
"""This is GmetaPolicyMapElement class."""
consts = GmetaPolicyMapElementConsts()
naming_props = set([u'name'])
mo_meta = MoMeta("GmetaPolicyMapElement", "gmetaPolicyMapElement", "policymapelem-[name]", VersionMeta.Version212a, "InputOutput", 0x3f, [], ["read-only"], [u'gmetaPolicyMapHolder'], [], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version212a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version212a, MoPropertyMeta.NAMING, 0x8, None, None, r"""[\-\.:_a-zA-Z0-9]{2,64}""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version212a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"name": "name",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.child_action = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "GmetaPolicyMapElement", parent_mo_or_dn, **kwargs)
| 47.625
| 248
| 0.653543
|
4a08aa816b36f1f96f58a0c92d8c9b3cd9bdc633
| 91
|
py
|
Python
|
twitter_feels/libs/twitter_analysis/__init__.py
|
michaelbrooks/twitter-feels
|
51dc00478f05841f3726edf5f7da7e0a46ae66e8
|
[
"MIT"
] | 1
|
2017-02-15T10:55:26.000Z
|
2017-02-15T10:55:26.000Z
|
twitter_feels/libs/twitter_analysis/__init__.py
|
michaelbrooks/twitter-feels
|
51dc00478f05841f3726edf5f7da7e0a46ae66e8
|
[
"MIT"
] | null | null | null |
twitter_feels/libs/twitter_analysis/__init__.py
|
michaelbrooks/twitter-feels
|
51dc00478f05841f3726edf5f7da7e0a46ae66e8
|
[
"MIT"
] | null | null | null |
from models import TweetStream, TweetTimeFrame
__all__ = ['TweetStream', 'TweetTimeFrame']
| 30.333333
| 46
| 0.802198
|
4a08ab8b19ed3373792b110b40e5b9d25fe3ca65
| 60,037
|
py
|
Python
|
qupulse/hardware/feature_awg/tabor.py
|
qutech-lab/qc-toolkit
|
f00e0d0000bdc7a6604ceae2c15b60f4d10c4000
|
[
"MIT"
] | 30
|
2018-09-13T02:59:55.000Z
|
2022-03-21T04:25:22.000Z
|
qupulse/hardware/feature_awg/tabor.py
|
qutech-lab/qc-toolkit
|
f00e0d0000bdc7a6604ceae2c15b60f4d10c4000
|
[
"MIT"
] | 319
|
2015-03-10T09:37:20.000Z
|
2018-09-06T10:11:32.000Z
|
qupulse/hardware/feature_awg/tabor.py
|
qutech-lab/qc-toolkit
|
f00e0d0000bdc7a6604ceae2c15b60f4d10c4000
|
[
"MIT"
] | 14
|
2019-01-08T14:42:36.000Z
|
2021-05-21T08:53:06.000Z
|
import functools
import logging
import numbers
import sys
import weakref
import warnings
from typing import List, Tuple, Set, Callable, Optional, Any, cast, Union, Dict, Mapping, NamedTuple, Iterable,\
Collection, Sequence
from collections import OrderedDict
import numpy as np
from qupulse import ChannelID
from qupulse._program._loop import Loop, make_compatible
from qupulse.hardware.feature_awg.channel_tuple_wrapper import ChannelTupleAdapter
from qupulse.hardware.feature_awg.features import ChannelSynchronization, AmplitudeOffsetHandling, VoltageRange, \
ProgramManagement, ActivatableChannels, DeviceControl, StatusTable, SCPI, VolatileParameters, \
ReadProgram, RepetitionMode
from qupulse.hardware.util import voltage_to_uint16, find_positions
from qupulse.utils.types import TimeType
from qupulse.hardware.feature_awg.base import AWGChannelTuple, AWGChannel, AWGDevice, AWGMarkerChannel
from qupulse._program.tabor import TaborSegment, TaborException, TaborProgram, PlottableProgram, TaborSequencing, \
make_combined_wave
import tabor_control.device
import pyvisa
assert (sys.byteorder == "little")
__all__ = ["TaborDevice", "TaborChannelTuple", "TaborChannel"]
TaborProgramMemory = NamedTuple("TaborProgramMemory", [("waveform_to_segment", np.ndarray),
("program", TaborProgram)])
def with_configuration_guard(function_object: Callable[["TaborChannelTuple", Any], Any]) -> Callable[
["TaborChannelTuple"], Any]:
"""This decorator assures that the AWG is in configuration mode while the decorated method runs."""
@functools.wraps(function_object)
def guarding_method(channel_pair: "TaborChannelTuple", *args, **kwargs) -> Any:
if channel_pair._configuration_guard_count == 0:
channel_pair._enter_config_mode()
channel_pair._configuration_guard_count += 1
try:
return function_object(channel_pair, *args, **kwargs)
finally:
channel_pair._configuration_guard_count -= 1
if channel_pair._configuration_guard_count == 0:
channel_pair._exit_config_mode()
return guarding_method
def with_select(function_object: Callable[["TaborChannelTuple", Any], Any]) -> Callable[["TaborChannelTuple"], Any]:
"""Asserts the channel pair is selcted when the wrapped function is called"""
@functools.wraps(function_object)
def selector(channel_tuple: "TaborChannelTuple", *args, **kwargs) -> Any:
channel_tuple._select()
return function_object(channel_tuple, *args, **kwargs)
return selector
########################################################################################################################
# Device
########################################################################################################################
# Features
class TaborSCPI(SCPI):
def __init__(self, device: "TaborDevice", visa: pyvisa.resources.MessageBasedResource):
super().__init__(visa)
self._parent = weakref.ref(device)
def send_cmd(self, cmd_str, paranoia_level=None):
for instr in self._parent().all_devices:
instr.send_cmd(cmd_str=cmd_str, paranoia_level=paranoia_level)
def send_query(self, query_str, query_mirrors=False) -> Any:
if query_mirrors:
return tuple(instr.send_query(query_str) for instr in self._parent().all_devices)
else:
return self._parent().main_instrument.send_query(query_str)
def _send_cmd(self, cmd_str, paranoia_level=None) -> Any:
"""Overwrite send_cmd for paranoia_level > 3"""
if paranoia_level is None:
paranoia_level = self._parent().paranoia_level
if paranoia_level < 3:
self._parent().super().send_cmd(cmd_str=cmd_str, paranoia_level=paranoia_level) # pragma: no cover
else:
cmd_str = cmd_str.rstrip()
if len(cmd_str) > 0:
ask_str = cmd_str + "; *OPC?; :SYST:ERR?"
else:
ask_str = "*OPC?; :SYST:ERR?"
*answers, opc, error_code_msg = self._parent()._visa_inst.ask(ask_str).split(";")
error_code, error_msg = error_code_msg.split(",")
error_code = int(error_code)
if error_code != 0:
_ = self._parent()._visa_inst.ask("*CLS; *OPC?")
if error_code == -450:
# query queue overflow
self.send_cmd(cmd_str)
else:
raise RuntimeError("Cannot execute command: {}\n{}: {}".format(cmd_str, error_code, error_msg))
assert len(answers) == 0
class TaborChannelSynchronization(ChannelSynchronization):
"""This Feature is used to synchronise a certain ammount of channels"""
def __init__(self, device: "TaborDevice"):
super().__init__()
self._parent = weakref.ref(device)
def synchronize_channels(self, group_size: int) -> None:
"""
Synchronize in groups of `group_size` channels. Groups of synchronized channels will be provided as
AWGChannelTuples. The channel_size must be evenly dividable by the number of channels
Args:
group_size: Number of channels per channel tuple
"""
if group_size == 2:
self._parent()._channel_tuples = []
for i in range((int)(len(self._parent().channels) / group_size)):
self._parent()._channel_tuples.append(
TaborChannelTuple((i + 1),
self._parent(),
self._parent().channels[(i * group_size):((i * group_size) + group_size)],
self._parent().marker_channels[(i * group_size):((i * group_size) + group_size)])
)
self._parent()[SCPI].send_cmd(":INST:COUP:STAT OFF")
elif group_size == 4:
self._parent()._channel_tuples = [TaborChannelTuple(1,
self._parent(),
self._parent().channels,
self._parent().marker_channels)]
self._parent()[SCPI].send_cmd(":INST:COUP:STAT ON")
else:
raise TaborException("Invalid group size")
class TaborDeviceControl(DeviceControl):
"""This feature is used for basic communication with a AWG"""
def __init__(self, device: "TaborDevice"):
super().__init__()
self._parent = weakref.ref(device)
def reset(self) -> None:
"""
Resetting the whole device. A command for resetting is send to the Device, the device is initialized again and
all channel tuples are cleared.
"""
self._parent()[SCPI].send_cmd(":RES")
self._parent()._coupled = None
self._parent()._initialize()
for channel_tuple in self._parent().channel_tuples:
channel_tuple[TaborProgramManagement].clear()
def trigger(self) -> None:
"""
This method triggers a device remotely.
"""
self._parent()[SCPI].send_cmd(":TRIG")
class TaborStatusTable(StatusTable):
def __init__(self, device: "TaborDevice"):
super().__init__()
self._parent = device
def get_status_table(self) -> Dict[str, Union[str, float, int]]:
"""
Send a lot of queries to the AWG about its settings. A good way to visualize is using pandas.DataFrame
Returns:
An ordered dictionary with the results
"""
name_query_type_list = [("channel", ":INST:SEL?", int),
("coupling", ":OUTP:COUP?", str),
("volt_dc", ":SOUR:VOLT:LEV:AMPL:DC?", float),
("volt_hv", ":VOLT:HV?", float),
("offset", ":VOLT:OFFS?", float),
("outp", ":OUTP?", str),
("mode", ":SOUR:FUNC:MODE?", str),
("shape", ":SOUR:FUNC:SHAPE?", str),
("dc_offset", ":SOUR:DC?", float),
("freq_rast", ":FREQ:RAST?", float),
("gated", ":INIT:GATE?", str),
("continuous", ":INIT:CONT?", str),
("continuous_enable", ":INIT:CONT:ENAB?", str),
("continuous_source", ":INIT:CONT:ENAB:SOUR?", str),
("marker_source", ":SOUR:MARK:SOUR?", str),
("seq_jump_event", ":SOUR:SEQ:JUMP:EVEN?", str),
("seq_adv_mode", ":SOUR:SEQ:ADV?", str),
("aseq_adv_mode", ":SOUR:ASEQ:ADV?", str),
("marker", ":SOUR:MARK:SEL?", int),
("marker_high", ":MARK:VOLT:HIGH?", str),
("marker_low", ":MARK:VOLT:LOW?", str),
("marker_width", ":MARK:WIDT?", int),
("marker_state", ":MARK:STAT?", str)]
data = OrderedDict((name, []) for name, *_ in name_query_type_list)
for ch in (1, 2, 3, 4):
self._parent.channels[ch - 1]._select()
self._parent.marker_channels[(ch - 1) % 2]._select()
for name, query, dtype in name_query_type_list:
data[name].append(dtype(self._parent[SCPI].send_query(query)))
return data
# Implementation
class TaborDevice(AWGDevice):
def __init__(self, device_name: str, instr_addr=None, paranoia_level=1, external_trigger=False, reset=False,
mirror_addresses=()):
"""
Constructor for a Tabor device
Args:
device_name (str): Name of the device
instr_addr: Instrument address that is forwarded to tabor_control
paranoia_level (int): Paranoia level that is forwarded to tabor_control
external_trigger (bool): Not supported yet
reset (bool):
mirror_addresses: list of devices on which the same things as on the main device are done.
For example you can a simulator and a real Device at once
"""
super().__init__(device_name)
self._instr = tabor_control.device.TEWXAwg(tabor_control.open_session(instr_addr), paranoia_level)
self._mirrors = tuple(tabor_control.device.TEWXAwg(tabor_control.open_session(address), paranoia_level)
for address in mirror_addresses)
self._coupled = None
self._clock_marker = [0, 0, 0, 0]
self.add_feature(TaborSCPI(self, self.main_instrument._visa_inst))
self.add_feature(TaborDeviceControl(self))
self.add_feature(TaborStatusTable(self))
if reset:
self[SCPI].send_cmd(":RES")
# Channel
self._channels = [TaborChannel(i + 1, self) for i in range(4)]
# MarkerChannels
self._marker_channels = [TaborMarkerChannel(i + 1, self) for i in range(4)]
self._initialize()
# ChannelTuple
self._channel_tuples = []
self.add_feature(TaborChannelSynchronization(self))
self[TaborChannelSynchronization].synchronize_channels(2)
if external_trigger:
raise NotImplementedError() # pragma: no cover
def enable(self) -> None:
"""
This method immediately generates the selected output waveform, if the device is in continuous and armed
repetition mode.
"""
self[SCPI].send_cmd(":ENAB")
def abort(self) -> None:
"""
With abort you can terminate the current generation of the output waveform. When the output waveform is
terminated the output starts generating an idle waveform.
"""
self[SCPI].send_cmd(":ABOR")
def set_coupled(self, coupled: bool) -> None:
"""
Thats the coupling of the device to 'coupled'
"""
if coupled:
self[SCPI].send_cmd("INST:COUP:STAT ON")
else:
self[SCPI].send_cmd("INST:COUP:STAT OFF")
def _is_coupled(self) -> bool:
"""
Returns true if the coupling of the device is 'coupled' otherwise false
"""
if self._coupled is None:
return self[SCPI].send_query(":INST:COUP:STAT?") == "ON"
else:
return self._coupled
def cleanup(self) -> None:
for channel_tuple in self.channel_tuples:
channel_tuple.cleanup()
@property
def channels(self) -> Collection["TaborChannel"]:
"""Returns a list of all channels of a Device"""
return self._channels
@property
def marker_channels(self) -> Collection["TaborMarkerChannel"]:
"""Returns a list of all marker channels of a device. The collection may be empty"""
return self._marker_channels
@property
def channel_tuples(self) -> Collection["TaborChannelTuple"]:
"""Returns a list of all channel tuples of a list"""
return self._channel_tuples
@property
def main_instrument(self) -> tabor_control.device.TEWXAwg:
return self._instr
@property
def mirrored_instruments(self) -> Sequence[tabor_control.device.TEWXAwg]:
return self._mirrors
@property
def all_devices(self) -> Sequence[tabor_control.device.TEWXAwg]:
return (self._instr,) + self._mirrors
@property
def _paranoia_level(self) -> tabor_control.ParanoiaLevel:
return self._instr.paranoia_level
@_paranoia_level.setter
def _paranoia_level(self, val):
for instr in self.all_devices:
instr.paranoia_level = val
@property
def dev_properties(self) -> dict:
return self._instr.dev_properties.as_dict()
def _send_binary_data(self, bin_dat, paranoia_level=None):
for instr in self.all_devices:
instr.write_segment_data(bin_dat, paranoia_level=paranoia_level)
def _download_segment_lengths(self, seg_len_list, paranoia_level=None):
for instr in self.all_devices:
instr.write_segment_lengths(seg_len_list, paranoia_level=paranoia_level)
def _download_sequencer_table(self, seq_table, paranoia_level=None):
for instr in self.all_devices:
instr.write_sequencer_table(seq_table, paranoia_level=paranoia_level)
def _download_adv_seq_table(self, seq_table, paranoia_level=None):
for instr in self.all_devices:
instr.write_advanced_sequencer_table(seq_table, paranoia_level=paranoia_level)
def _initialize(self) -> None:
# 1. Select channel
# 2. Turn off gated mode
# 3. Turn on continous mode
# 4. Armed mode (only generate waveforms after enab command)
# 5. Expect enable signal from (USB / LAN / GPIB)
# 6. Use arbitrary waveforms as marker source
# 7. Expect jump command for sequencing from (USB / LAN / GPIB)
setup_command = (
":INIT:GATE OFF; :INIT:CONT ON; "
":INIT:CONT:ENAB ARM; :INIT:CONT:ENAB:SOUR BUS;"
":SOUR:MARK:SOUR USER; :SOUR:SEQ:JUMP:EVEN BUS ")
self[SCPI].send_cmd(":INST:SEL 1")
self[SCPI].send_cmd(setup_command)
self[SCPI].send_cmd(":INST:SEL 3")
self[SCPI].send_cmd(setup_command)
def _get_readable_device(self, simulator=True) -> tabor_control.device.TEWXAwg:
"""
A method to get the first readable device out of all devices.
A readable device is a device which you can read data from like a simulator.
Returns:
The first readable device out of all devices
Throws:
TaborException: this exception is thrown if there is no readable device in the list of all devices
"""
for device in self.all_devices:
if device.supports_basic_reading():
if simulator:
if device.is_simulator:
return device
else:
return device
raise TaborException("No device capable of device data read")
########################################################################################################################
# Channel
########################################################################################################################
# Features
class TaborVoltageRange(VoltageRange):
def __init__(self, channel: "TaborChannel"):
super().__init__()
self._parent = weakref.ref(channel)
@property
@with_select
def offset(self) -> float:
"""Get offset of AWG channel"""
return float(
self._parent().device[SCPI].send_query(":VOLT:OFFS?".format(channel=self._parent().idn)))
@property
@with_select
def amplitude(self) -> float:
"""Get amplitude of AWG channel"""
coupling = self._parent().device[SCPI].send_query(":OUTP:COUP?")
if coupling == "DC":
return float(self._parent().device[SCPI].send_query(":VOLT?"))
elif coupling == "HV":
return float(self._parent().device[SCPI].send_query(":VOLT:HV?"))
else:
raise TaborException("Unknown coupling: {}".format(coupling))
@property
def amplitude_offset_handling(self) -> AmplitudeOffsetHandling:
"""
Gets the amplitude and offset handling of this channel. The amplitude-offset controls if the amplitude and
offset settings are constant or if these should be optimized by the driver
"""
return self._parent()._amplitude_offset_handling
@amplitude_offset_handling.setter
def amplitude_offset_handling(self, amp_offs_handling: Union[AmplitudeOffsetHandling, str]) -> None:
"""
amp_offs_handling: See possible values at `AWGAmplitudeOffsetHandling`
"""
amp_offs_handling = AmplitudeOffsetHandling(AmplitudeOffsetHandling)
self._parent()._amplitude_offset_handling = amp_offs_handling
def _select(self) -> None:
self._parent()._select()
class TaborActivatableChannels(ActivatableChannels):
def __init__(self, channel: "TaborChannel"):
super().__init__()
self._parent = weakref.ref(channel)
@property
def enabled(self) -> bool:
"""
Returns the the state a channel has at the moment. A channel is either activated or deactivated
True stands for activated and false for deactivated
"""
return self._parent().device[SCPI].send_query(":OUTP ?") == "ON"
@with_select
def enable(self):
"""Enables the output of a certain channel"""
command_string = ":OUTP ON".format(ch_id=self._parent().idn)
self._parent().device[SCPI].send_cmd(command_string)
@with_select
def disable(self):
"""Disables the output of a certain channel"""
command_string = ":OUTP OFF".format(ch_id=self._parent().idn)
self._parent().device[SCPI].send_cmd(command_string)
def _select(self) -> None:
self._parent()._select()
# Implementation
class TaborChannel(AWGChannel):
def __init__(self, idn: int, device: TaborDevice):
super().__init__(idn)
self._device = weakref.ref(device)
self._amplitude_offset_handling = AmplitudeOffsetHandling.IGNORE_OFFSET
# adding Features
self.add_feature(TaborVoltageRange(self))
self.add_feature(TaborActivatableChannels(self))
@property
def device(self) -> TaborDevice:
"""Returns the device that the channel belongs to"""
return self._device()
@property
def channel_tuple(self) -> "TaborChannelTuple":
"""Returns the channel tuple that this channel belongs to"""
return self._channel_tuple()
def _set_channel_tuple(self, channel_tuple: "TaborChannelTuple") -> None:
"""
The channel tuple "channel_tuple" is assigned to this channel
Args:
channel_tuple (TaborChannelTuple): the channel tuple that this channel belongs to
"""
self._channel_tuple = weakref.ref(channel_tuple)
def _select(self) -> None:
self.device[SCPI].send_cmd(":INST:SEL {channel}".format(channel=self.idn))
########################################################################################################################
# ChannelTuple
########################################################################################################################
# Features
class TaborProgramManagement(ProgramManagement):
def __init__(self, channel_tuple: "TaborChannelTuple"):
super().__init__(channel_tuple)
self._programs = {}
self._armed_program = None
self._idle_sequence_table = [(1, 1, 0), (1, 1, 0), (1, 1, 0)]
self._trigger_source = 'BUS'
def get_repetition_mode(self, program_name: str) -> str:
"""
Returns the default repetition mode of a certain program
Args:
program_name (str): name of the program whose repetition mode should be returned
"""
return self._channel_tuple._known_programs[program_name].program._repetition_mode
def set_repetition_mode(self, program_name: str, repetition_mode: str) -> None:
"""
Changes the default repetition mode of a certain program
Args:
program_name (str): name of the program whose repetition mode should be changed
Throws:
ValueError: this Exception is thrown when an invalid repetition mode is given
"""
if repetition_mode in ("infinite", "once"):
self._channel_tuple._known_programs[program_name].program._repetition_mode = repetition_mode
else:
raise ValueError("{} is no vaild repetition mode".format(repetition_mode))
@property
def supported_repetition_modes(self) -> Set[RepetitionMode]:
return {RepetitionMode.INFINITE}
@with_configuration_guard
@with_select
def upload(self, name: str,
program: Loop,
channels: Tuple[Optional[ChannelID], Optional[ChannelID]],
marker_channels: Tuple[Optional[ChannelID], Optional[ChannelID]],
voltage_transformation: Tuple[Callable, Callable],
repetition_mode: str = None,
force: bool = False) -> None:
"""
Upload a program to the AWG.
The policy is to prefer amending the unknown waveforms to overwriting old ones.
"""
if repetition_mode is None:
repetition_mode = self._default_repetition_mode
else:
repetition_mode = RepetitionMode(repetition_mode)
if repetition_mode not in self.supported_repetition_modes:
raise ValueError(f"{repetition_mode} is not supported on {self._channel_tuple}")
if len(channels) != len(self._channel_tuple.channels):
raise ValueError("Wrong number of channels")
if len(marker_channels) != len(self._channel_tuple.marker_channels):
raise ValueError("Wrong number of marker")
if len(voltage_transformation) != len(self._channel_tuple.channels):
raise ValueError("Wrong number of voltage transformations")
# adjust program to fit criteria
sample_rate = self._channel_tuple.device.channel_tuples[0].sample_rate
make_compatible(program,
minimal_waveform_length=192,
waveform_quantum=16,
sample_rate=sample_rate / 10 ** 9)
if name in self._channel_tuple._known_programs:
if force:
self._channel_tuple.free_program(name)
else:
raise ValueError('{} is already known on {}'.format(name, self._channel_tuple.idn))
# They call the peak to peak range amplitude
ranges = tuple(ch[VoltageRange].amplitude for ch in self._channel_tuple.channels)
voltage_amplitudes = tuple(range / 2 for range in ranges)
voltage_offsets = []
for channel in self._channel_tuple.channels:
if channel._amplitude_offset_handling == AmplitudeOffsetHandling.IGNORE_OFFSET:
voltage_offsets.append(0)
elif channel._amplitude_offset_handling == AmplitudeOffsetHandling.CONSIDER_OFFSET:
voltage_offsets.append(channel[VoltageRange].offset)
else:
raise NotImplementedError(
'{} is invalid as AWGAmplitudeOffsetHandling'.format(channel._amplitude_offset_handling))
voltage_offsets = tuple(voltage_offsets)
# parse to tabor program
tabor_program = TaborProgram(program,
channels=tuple(channels),
markers=marker_channels,
device_properties=self._channel_tuple.device.dev_properties,
sample_rate=sample_rate / 10 ** 9,
amplitudes=voltage_amplitudes,
offsets=voltage_offsets,
voltage_transformations=voltage_transformation)
segments, segment_lengths = tabor_program.get_sampled_segments()
waveform_to_segment, to_amend, to_insert = self._channel_tuple._find_place_for_segments_in_memory(segments,
segment_lengths)
self._channel_tuple._segment_references[waveform_to_segment[waveform_to_segment >= 0]] += 1
for wf_index in np.flatnonzero(to_insert > 0):
segment_index = to_insert[wf_index]
self._channel_tuple._upload_segment(to_insert[wf_index], segments[wf_index])
waveform_to_segment[wf_index] = segment_index
if np.any(to_amend):
segments_to_amend = [segments[idx] for idx in np.flatnonzero(to_amend)]
waveform_to_segment[to_amend] = self._channel_tuple._amend_segments(segments_to_amend)
self._channel_tuple._known_programs[name] = TaborProgramMemory(waveform_to_segment=waveform_to_segment,
program=tabor_program)
# set the default repetionmode for a programm
self.set_repetition_mode(program_name=name, repetition_mode=repetition_mode)
def remove(self, name: str) -> None:
"""
Remove a program from the AWG.
Also discards all waveforms referenced only by the program identified by name.
Args:
name (str): The name of the program to remove.
"""
self._channel_tuple.free_program(name)
self._channel_tuple.cleanup()
def clear(self) -> None:
"""
Removes all programs and waveforms from the AWG.
Caution: This affects all programs and waveforms on the AWG, not only those uploaded using qupulse!
"""
self._channel_tuple.device.channels[0]._select()
self._channel_tuple.device[SCPI].send_cmd(":TRAC:DEL:ALL")
self._channel_tuple.device[SCPI].send_cmd(":SOUR:SEQ:DEL:ALL")
self._channel_tuple.device[SCPI].send_cmd(":ASEQ:DEL")
self._channel_tuple.device[SCPI].send_cmd(":TRAC:DEF 1, 192")
self._channel_tuple.device[SCPI].send_cmd(":TRAC:SEL 1")
self._channel_tuple.device[SCPI].send_cmd(":TRAC:MODE COMB")
self._channel_tuple.device._send_binary_data(bin_dat=self._channel_tuple._idle_segment.get_as_binary())
self._channel_tuple._segment_lengths = 192 * np.ones(1, dtype=np.uint32)
self._channel_tuple._segment_capacity = 192 * np.ones(1, dtype=np.uint32)
self._channel_tuple._segment_hashes = np.ones(1, dtype=np.int64) * hash(self._channel_tuple._idle_segment)
self._channel_tuple._segment_references = np.ones(1, dtype=np.uint32)
self._channel_tuple._advanced_sequence_table = []
self._channel_tuple._sequencer_tables = []
self._channel_tuple._known_programs = dict()
self._change_armed_program(None)
@with_select
def arm(self, name: Optional[str]) -> None:
"""
Load the program 'name' and arm the device for running it.
Args:
name (str): the program the device should change to
"""
if self._channel_tuple._current_program == name:
self._channel_tuple.device[SCPI].send_cmd("SEQ:SEL 1")
else:
self._change_armed_program(name)
@property
def programs(self) -> Set[str]:
"""The set of program names that can currently be executed on the hardware AWG."""
return set(program.name for program in self._channel_tuple._known_programs.keys())
@with_select
def run_current_program(self) -> None:
"""
This method starts running the active program
Throws:
RuntimeError: This exception is thrown if there is no active program for this device
"""
if (self._channel_tuple.device._is_coupled()):
# channel tuple is the first channel tuple
if (self._channel_tuple.device._channel_tuples[0] == self):
if self._channel_tuple._current_program:
repetition_mode = self._channel_tuple._known_programs[
self._channel_tuple._current_program].program._repetition_mode
if repetition_mode == "infinite":
self._cont_repetition_mode()
self._channel_tuple.device[SCPI].send_cmd(':TRIG',
paranoia_level=self._channel_tuple.internal_paranoia_level)
else:
raise ValueError("{} is no vaild repetition mode".format(repetition_mode))
else:
raise RuntimeError("No program active")
else:
warnings.warn(
"TaborWarning - run_current_program() - the device is coupled - runthe program via the first channel tuple")
else:
if self._channel_tuple._current_program:
repetition_mode = self._channel_tuple._known_programs[
self._channel_tuple._current_program].program._repetition_mode
if repetition_mode == "infinite":
self._cont_repetition_mode()
self._channel_tuple.device[SCPI].send_cmd(':TRIG', paranoia_level=self._channel_tuple.internal_paranoia_level)
else:
raise ValueError("{} is no vaild repetition mode".format(repetition_mode))
else:
raise RuntimeError("No program active")
@with_select
@with_configuration_guard
def _change_armed_program(self, name: Optional[str]) -> None:
"""The armed program of the channel tuple is changed to the program with the name 'name'"""
if name is None:
sequencer_tables = [self._idle_sequence_table]
advanced_sequencer_table = [(1, 1, 0)]
else:
waveform_to_segment_index, program = self._channel_tuple._known_programs[name]
waveform_to_segment_number = waveform_to_segment_index + 1
# translate waveform number to actual segment
sequencer_tables = [[(rep_count, waveform_to_segment_number[wf_index], jump_flag)
for ((rep_count, wf_index, jump_flag), _) in sequencer_table]
for sequencer_table in program.get_sequencer_tables()]
# insert idle sequence
sequencer_tables = [self._idle_sequence_table] + sequencer_tables
# adjust advanced sequence table entries by idle sequence table offset
advanced_sequencer_table = [(rep_count, seq_no + 1, jump_flag)
for rep_count, seq_no, jump_flag in program.get_advanced_sequencer_table()]
if program.waveform_mode == TaborSequencing.SINGLE:
assert len(advanced_sequencer_table) == 1
assert len(sequencer_tables) == 2
while len(sequencer_tables[1]) < self._channel_tuple.device.dev_properties["min_seq_len"]:
assert advanced_sequencer_table[0][0] == 1
sequencer_tables[1].append((1, 1, 0))
# insert idle sequence in advanced sequence table
advanced_sequencer_table = [(1, 1, 0)] + advanced_sequencer_table
while len(advanced_sequencer_table) < self._channel_tuple.device.dev_properties["min_aseq_len"]:
advanced_sequencer_table.append((1, 1, 0))
self._channel_tuple.device[SCPI].send_cmd("SEQ:DEL:ALL", paranoia_level=self._channel_tuple.internal_paranoia_level)
self._channel_tuple._sequencer_tables = []
self._channel_tuple.device[SCPI].send_cmd("ASEQ:DEL", paranoia_level=self._channel_tuple.internal_paranoia_level)
self._channel_tuple._advanced_sequence_table = []
# download all sequence tables
for i, sequencer_table in enumerate(sequencer_tables):
self._channel_tuple.device[SCPI].send_cmd("SEQ:SEL {}".format(i + 1),
paranoia_level=self._channel_tuple.internal_paranoia_level)
self._channel_tuple.device._download_sequencer_table(sequencer_table)
self._channel_tuple._sequencer_tables = sequencer_tables
self._channel_tuple.device[SCPI].send_cmd("SEQ:SEL 1", paranoia_level=self._channel_tuple.internal_paranoia_level)
self._channel_tuple.device._download_adv_seq_table(advanced_sequencer_table)
self._channel_tuple._advanced_sequence_table = advanced_sequencer_table
self._channel_tuple._current_program = name
def _select(self):
self._channel_tuple.channels[0]._select()
@property
def _configuration_guard_count(self):
return self._channel_tuple._configuration_guard_count
@_configuration_guard_count.setter
def _configuration_guard_count(self, configuration_guard_count):
self._channel_tuple._configuration_guard_count = configuration_guard_count
def _enter_config_mode(self):
self._channel_tuple._enter_config_mode()
def _exit_config_mode(self):
self._channel_tuple._exit_config_mode()
@with_select
def _cont_repetition_mode(self):
"""Changes the run mode of this channel tuple to continous mode"""
self._channel_tuple.device[SCPI].send_cmd(f":TRIG:SOUR:ADV EXT")
self._channel_tuple.device[SCPI].send_cmd(
f":INIT:GATE OFF; :INIT:CONT ON; :INIT:CONT:ENAB ARM; :INIT:CONT:ENAB:SOUR {self._trigger_source}")
class TaborVolatileParameters(VolatileParameters):
def __init__(self, channel_tuple: "TaborChannelTuple", ):
super().__init__(channel_tuple=channel_tuple)
def set_volatile_parameters(self, program_name: str, parameters: Mapping[str, numbers.Number]) -> None:
""" Set the values of parameters which were marked as volatile on program creation. Sets volatile parameters
in program memory and device's (adv.) sequence tables if program is current program.
If set_volatile_parameters needs to run faster, set CONFIG_MODE_PARANOIA_LEVEL to 0 which causes the device to
enter the configuration mode with paranoia level 0 (Note: paranoia level 0 does not work for the simulator)
and set device._is_coupled.
Args:
program_name: Name of program which should be changed.
parameters: Names of volatile parameters and respective values to which they should be set.
"""
waveform_to_segment_index, program = self._channel_tuple._known_programs[program_name]
modifications = program.update_volatile_parameters(parameters)
self._channel_tuple.logger.debug("parameter modifications: %r" % modifications)
if not modifications:
self._channel_tuple.logger.info(
"There are no volatile parameters to update. Either there are no volatile parameters with "
"these names,\nthe respective repetition counts already have the given values or the "
"volatile parameters were dropped during upload.")
return
if program_name == self._channel_tuple._current_program:
commands = []
for position, entry in modifications.items():
if not entry.repetition_count > 0:
raise ValueError("Repetition must be > 0")
if isinstance(position, int):
commands.append(":ASEQ:DEF {},{},{},{}".format(position + 1, entry.element_number + 1,
entry.repetition_count, entry.jump_flag))
else:
table_num, step_num = position
commands.append(":SEQ:SEL {}".format(table_num + 2))
commands.append(":SEQ:DEF {},{},{},{}".format(step_num,
waveform_to_segment_index[entry.element_id] + 1,
entry.repetition_count, entry.jump_flag))
self._channel_tuple._execute_multiple_commands_with_config_guard(commands)
# Wait until AWG is finished
_ = self._channel_tuple.device.main_instrument._visa_inst.query("*OPC?")
class TaborReadProgram(ReadProgram):
def __init__(self, channel_tuple: "TaborChannelTuple", ):
super().__init__(channel_tuple=channel_tuple)
def read_complete_program(self):
return PlottableProgram.from_read_data(self._channel_tuple.read_waveforms(),
self._channel_tuple.read_sequence_tables(),
self._channel_tuple.read_advanced_sequencer_table())
# Implementation
class TaborChannelTuple(AWGChannelTuple):
CONFIG_MODE_PARANOIA_LEVEL = None
def __init__(self, idn: int, device: TaborDevice, channels: Iterable["TaborChannel"],
marker_channels: Iterable["TaborMarkerChannel"]):
super().__init__(idn)
self._device = weakref.ref(device)
self._configuration_guard_count = 0
self._is_in_config_mode = False
self._channels = tuple(channels)
self._marker_channels = tuple(marker_channels)
# the channel and channel marker are assigned to this channel tuple
for channel in self.channels:
channel._set_channel_tuple(self)
for marker_ch in self.marker_channels:
marker_ch._set_channel_tuple(self)
# adding Features
self.add_feature(TaborProgramManagement(self))
self.add_feature(TaborVolatileParameters(self))
self._idle_segment = TaborSegment.from_sampled(voltage_to_uint16(voltage=np.zeros(192),
output_amplitude=0.5,
output_offset=0., resolution=14),
voltage_to_uint16(voltage=np.zeros(192),
output_amplitude=0.5,
output_offset=0., resolution=14),
None, None)
self._known_programs = dict() # type: Dict[str, TaborProgramMemory]
self._current_program = None
self._segment_lengths = None
self._segment_capacity = None
self._segment_hashes = None
self._segment_references = None
self._sequencer_tables = None
self._advanced_sequence_table = None
self._internal_paranoia_level = 0
self[TaborProgramManagement].clear()
self._channel_tuple_adapter: ChannelTupleAdapter
@property
def internal_paranoia_level(self) -> Optional[int]:
return self._internal_paranoia_level
@property
def logger(self):
return logging.getLogger("qupulse.tabor")
@property
def channel_tuple_adapter(self) -> ChannelTupleAdapter:
if self._channel_tuple_adapter is None:
self._channel_tuple_adapter = ChannelTupleAdapter(self)
return self._channel_tuple_adapter
def _select(self) -> None:
"""The channel tuple is selected, which means that the first channel of the channel tuple is selected"""
self.channels[0]._select()
@property
def device(self) -> TaborDevice:
"""Returns the device that the channel tuple belongs to"""
return self._device()
@property
def channels(self) -> Collection["TaborChannel"]:
"""Returns all channels of the channel tuple"""
return self._channels
@property
def marker_channels(self) -> Collection["TaborMarkerChannel"]:
"""Returns all marker channels of the channel tuple"""
return self._marker_channels
@property
@with_select
def sample_rate(self) -> TimeType:
"""Returns the sample rate that the channels of a channel tuple have"""
return TimeType.from_float(
float(self.device[SCPI].send_query(":FREQ:RAST?".format(channel=self.channels[0].idn))))
@property
def total_capacity(self) -> int:
return int(self.device.dev_properties["max_arb_mem"]) // 2
def free_program(self, name: str) -> TaborProgramMemory:
if name is None:
raise TaborException("Removing 'None' program is forbidden.")
program = self._known_programs.pop(name)
self._segment_references[program.waveform_to_segment] -= 1
if self._current_program == name:
self[TaborProgramManagement]._change_armed_program(None)
return program
@property
def _segment_reserved(self) -> np.ndarray:
return self._segment_references > 0
@property
def _free_points_in_total(self) -> int:
return self.total_capacity - np.sum(self._segment_capacity[self._segment_reserved])
@property
def _free_points_at_end(self) -> int:
reserved_index = np.flatnonzero(self._segment_reserved)
if len(reserved_index):
return self.total_capacity - np.sum(self._segment_capacity[:reserved_index[-1]])
else:
return self.total_capacity
@with_select
def read_waveforms(self) -> List[np.ndarray]:
device = self.device._get_readable_device(simulator=True)
old_segment = device.send_query(":TRAC:SEL?")
waveforms = []
uploaded_waveform_indices = np.flatnonzero(
self._segment_references) + 1
for segment in uploaded_waveform_indices:
device.send_cmd(":TRAC:SEL {}".format(segment), paranoia_level=self.internal_paranoia_level)
waveforms.append(device.read_segment_data())
device.send_cmd(":TRAC:SEL {}".format(old_segment), paranoia_level=self.internal_paranoia_level)
return waveforms
@with_select
def read_sequence_tables(self) -> List[Tuple[np.ndarray, np.ndarray, np.ndarray]]:
device = self.device._get_readable_device(simulator=True)
old_sequence = device.send_query(":SEQ:SEL?")
sequences = []
uploaded_sequence_indices = np.arange(len(self._sequencer_tables)) + 1
for sequence in uploaded_sequence_indices:
device.send_cmd(":SEQ:SEL {}".format(sequence), paranoia_level=self.internal_paranoia_level)
table = device.read_sequencer_table()
sequences.append((table['repeats'], table['segment_no'], table['jump_flag']))
device.send_cmd(":SEQ:SEL {}".format(old_sequence), paranoia_level=self.internal_paranoia_level)
return sequences
@with_select
def read_advanced_sequencer_table(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
table = self.device._get_readable_device(simulator=True).read_advanced_sequencer_table()
return table['repeats'], table['segment_no'], table['jump_flag']
def read_complete_program(self) -> PlottableProgram:
return PlottableProgram.from_read_data(self.read_waveforms(),
self.read_sequence_tables(),
self.read_advanced_sequencer_table())
def _find_place_for_segments_in_memory(self, segments: Sequence, segment_lengths: np.ndarray) -> \
Tuple[np.ndarray, np.ndarray, np.ndarray]:
# TODO: comment was not finished
"""
1. Find known segments
2. Find empty spaces with fitting length
3. Find empty spaces with bigger length
4. Amend remaining segments
Args:
segments (Sequence):
segment_length (Sequence):
Returns:
"""
segment_hashes = np.fromiter((hash(segment) for segment in segments), count=len(segments), dtype=np.int64)
waveform_to_segment = find_positions(self._segment_hashes, segment_hashes)
# separate into known and unknown
unknown = (waveform_to_segment == -1)
known = ~unknown
known_pos_in_memory = waveform_to_segment[known]
assert len(known_pos_in_memory) == 0 or np.all(
self._segment_hashes[known_pos_in_memory] == segment_hashes[known])
new_reference_counter = self._segment_references.copy()
new_reference_counter[known_pos_in_memory] += 1
to_upload_size = np.sum(segment_lengths[unknown] + 16)
free_points_in_total = self.total_capacity - np.sum(self._segment_capacity[self._segment_references > 0])
if free_points_in_total < to_upload_size:
raise MemoryError("Not enough free memory",
free_points_in_total,
to_upload_size,
self._free_points_in_total)
to_amend = cast(np.ndarray, unknown)
to_insert = np.full(len(segments), fill_value=-1, dtype=np.int64)
reserved_indices = np.flatnonzero(new_reference_counter > 0)
first_free = reserved_indices[-1] + 1 if len(reserved_indices) else 0
free_segments = new_reference_counter[:first_free] == 0
free_segment_count = np.sum(free_segments)
# look for a free segment place with the same length
for segment_idx in np.flatnonzero(to_amend):
if free_segment_count == 0:
break
pos_of_same_length = np.logical_and(free_segments,
segment_lengths[segment_idx] == self._segment_capacity[:first_free])
idx_same_length = np.argmax(pos_of_same_length)
if pos_of_same_length[idx_same_length]:
free_segments[idx_same_length] = False
free_segment_count -= 1
to_amend[segment_idx] = False
to_insert[segment_idx] = idx_same_length
# try to find places that are larger than the segments to fit in starting with the large segments and large
# free spaces
segment_indices = np.flatnonzero(to_amend)[np.argsort(segment_lengths[to_amend])[::-1]]
capacities = self._segment_capacity[:first_free]
for segment_idx in segment_indices:
free_capacities = capacities[free_segments]
free_segments_indices = np.flatnonzero(free_segments)[np.argsort(free_capacities)[::-1]]
if len(free_segments_indices) == 0:
break
fitting_segment = np.argmax((free_capacities >= segment_lengths[segment_idx])[::-1])
fitting_segment = free_segments_indices[fitting_segment]
if self._segment_capacity[fitting_segment] >= segment_lengths[segment_idx]:
free_segments[fitting_segment] = False
to_amend[segment_idx] = False
to_insert[segment_idx] = fitting_segment
free_points_at_end = self.total_capacity - np.sum(self._segment_capacity[:first_free])
if np.sum(segment_lengths[to_amend] + 16) > free_points_at_end:
raise MemoryError("Fragmentation does not allow upload.",
np.sum(segment_lengths[to_amend] + 16),
free_points_at_end,
self._free_points_at_end)
return waveform_to_segment, to_amend, to_insert
@with_select
@with_configuration_guard
def _upload_segment(self, segment_index: int, segment: TaborSegment) -> None:
if self._segment_references[segment_index] > 0:
raise ValueError("Reference count not zero")
if segment.num_points > self._segment_capacity[segment_index]:
raise ValueError("Cannot upload segment here.")
segment_no = segment_index + 1
self.device[TaborSCPI].send_cmd(":TRAC:DEF {}, {}".format(segment_no, segment.num_points),
paranoia_level=self.internal_paranoia_level)
self._segment_lengths[segment_index] = segment.num_points
self.device[TaborSCPI].send_cmd(":TRAC:SEL {}".format(segment_no),
paranoia_level=self.internal_paranoia_level)
self.device[TaborSCPI].send_cmd(":TRAC:MODE COMB",
paranoia_level=self.internal_paranoia_level)
wf_data = segment.get_as_binary()
self.device._send_binary_data(bin_dat=wf_data)
self._segment_references[segment_index] = 1
self._segment_hashes[segment_index] = hash(segment)
@with_select
@with_configuration_guard
def _amend_segments(self, segments: List[TaborSegment]) -> np.ndarray:
new_lengths = np.asarray([s.num_points for s in segments], dtype=np.uint32)
wf_data = make_combined_wave(segments)
trac_len = len(wf_data) // 2
segment_index = len(self._segment_capacity)
first_segment_number = segment_index + 1
self.device[TaborSCPI].send_cmd(":TRAC:DEF {},{}".format(first_segment_number, trac_len),
paranoia_level=self.internal_paranoia_level)
self.device[TaborSCPI].send_cmd(":TRAC:SEL {}".format(first_segment_number),
paranoia_level=self.internal_paranoia_level)
self.device[TaborSCPI].send_cmd(":TRAC:MODE COMB",
paranoia_level=self.internal_paranoia_level)
self.device._send_binary_data(bin_dat=wf_data)
old_to_update = np.count_nonzero(self._segment_capacity != self._segment_lengths)
segment_capacity = np.concatenate((self._segment_capacity, new_lengths))
segment_lengths = np.concatenate((self._segment_lengths, new_lengths))
segment_references = np.concatenate((self._segment_references, np.ones(len(segments), dtype=int)))
segment_hashes = np.concatenate((self._segment_hashes, [hash(s) for s in segments]))
if len(segments) < old_to_update:
for i, segment in enumerate(segments):
current_segment_number = first_segment_number + i
self.device[TaborSCPI].send_cmd(":TRAC:DEF {},{}".format(current_segment_number, segment.num_points),
paranoia_level=self.internal_paranoia_level)
else:
# flush the capacity
self.device._download_segment_lengths(segment_capacity)
# update non fitting lengths
for i in np.flatnonzero(segment_capacity != segment_lengths):
self.device[SCPI].send_cmd(":TRAC:DEF {},{}".format(i + 1, segment_lengths[i]))
self._segment_capacity = segment_capacity
self._segment_lengths = segment_lengths
self._segment_hashes = segment_hashes
self._segment_references = segment_references
return segment_index + np.arange(len(segments), dtype=np.int64)
@with_select
@with_configuration_guard
def cleanup(self) -> None:
"""Discard all segments after the last which is still referenced"""
reserved_indices = np.flatnonzero(self._segment_references > 0)
old_end = len(self._segment_lengths)
new_end = reserved_indices[-1] + 1 if len(reserved_indices) else 0
self._segment_lengths = self._segment_lengths[:new_end]
self._segment_capacity = self._segment_capacity[:new_end]
self._segment_hashes = self._segment_hashes[:new_end]
self._segment_references = self._segment_references[:new_end]
try:
# send max 10 commands at once
chunk_size = 10
for chunk_start in range(new_end, old_end, chunk_size):
self.device[SCPI].send_cmd("; ".join("TRAC:DEL {}".format(i + 1)
for i in range(chunk_start, min(chunk_start + chunk_size, old_end))))
except Exception as e:
raise TaborUndefinedState("Error during cleanup. Device is in undefined state.", device=self) from e
@with_configuration_guard
def _execute_multiple_commands_with_config_guard(self, commands: List[str]) -> None:
""" Joins the given commands into one and executes it with configuration guard.
Args:
commands: Commands that should be executed.
"""
cmd_str = ";".join(commands)
self.device[TaborSCPI].send_cmd(cmd_str, paranoia_level=self.internal_paranoia_level)
def _enter_config_mode(self) -> None:
"""
Enter the configuration mode if not already in. All outputs are set to the DC offset of the device and the
sequencing is disabled. The manual states this speeds up sequence validation when uploading multiple sequences.
When entering and leaving the configuration mode the AWG outputs a small (~60 mV in 4 V mode) blip.
"""
if self._is_in_config_mode is False:
# 1. Selct channel pair
# 2. Select DC as function shape
# 3. Select build-in waveform mode
if self.device._is_coupled():
out_cmd = ":OUTP:ALL OFF"
else:
out_cmd = ""
for channel in self.channels:
out_cmd = out_cmd + ":INST:SEL {ch_id}; :OUTP OFF;".format(ch_id=channel.idn)
marker_0_cmd = ":SOUR:MARK:SEL 1;:SOUR:MARK:SOUR USER;:SOUR:MARK:STAT OFF"
marker_1_cmd = ":SOUR:MARK:SEL 2;:SOUR:MARK:SOUR USER;:SOUR:MARK:STAT OFF"
wf_mode_cmd = ":SOUR:FUNC:MODE FIX"
cmd = ";".join([marker_0_cmd, marker_1_cmd, wf_mode_cmd])
cmd = out_cmd + cmd
self.device[TaborSCPI].send_cmd(cmd, paranoia_level=self.CONFIG_MODE_PARANOIA_LEVEL)
self._is_in_config_mode = True
@with_select
def _exit_config_mode(self) -> None:
"""Leave the configuration mode. Enter advanced sequence mode and turn on all outputs"""
if self.device._is_coupled():
# Coupled -> switch all channels at once
other_channel_tuple: TaborChannelTuple
if self.channels == self.device.channel_tuples[0].channels:
other_channel_tuple = self.device.channel_tuples[1]
else:
other_channel_tuple = self.device.channel_tuples[0]
if not other_channel_tuple._is_in_config_mode:
self.device[SCPI].send_cmd(":SOUR:FUNC:MODE ASEQ")
self.device[SCPI].send_cmd(":SEQ:SEL 1")
self.device[SCPI].send_cmd(":OUTP:ALL ON")
else:
self.device[SCPI].send_cmd(":SOUR:FUNC:MODE ASEQ")
self.device[SCPI].send_cmd(":SEQ:SEL 1")
for channel in self.channels:
channel[ActivatableChannels].enable()
for marker_ch in self.marker_channels:
marker_ch[ActivatableChannels].enable()
self._is_in_config_mode = False
########################################################################################################################
# Marker Channel
########################################################################################################################
# Features
class TaborActivatableMarkerChannels(ActivatableChannels):
def __init__(self, marker_channel: "TaborMarkerChannel"):
super().__init__()
self._parent = weakref.ref(marker_channel)
@property
def enabled(self) -> bool:
"""
Returns the the state a marker channel has at the moment. A channel is either activated or deactivated
True stands for activated and false for deactivated
"""
return self._parent().device[SCPI].send_query(":MARK:STAT ?") == "ON"
@with_select
def enable(self):
"""Enables the output of a certain marker channel"""
command_string = "SOUR:MARK:SOUR USER; :SOUR:MARK:STAT ON"
command_string = command_string.format(
channel=self._parent().channel_tuple.channels[0].idn,
marker=self._parent().channel_tuple.marker_channels.index(self._parent()) + 1)
self._parent().device[SCPI].send_cmd(command_string)
@with_select
def disable(self):
"""Disable the output of a certain marker channel"""
command_string = ":SOUR:MARK:SOUR USER; :SOUR:MARK:STAT OFF"
command_string = command_string.format(
channel=self._parent().channel_tuple.channels[0].idn,
marker=self._parent().channel_tuple.marker_channels.index(self._parent()) + 1)
self._parent().device[SCPI].send_cmd(command_string)
def _select(self) -> None:
self._parent()._select()
# Implementation
class TaborMarkerChannel(AWGMarkerChannel):
def __init__(self, idn: int, device: TaborDevice):
super().__init__(idn)
self._device = weakref.ref(device)
# adding Features
self.add_feature(TaborActivatableMarkerChannels(self))
@property
def device(self) -> TaborDevice:
"""Returns the device that this marker channel belongs to"""
return self._device()
@property
def channel_tuple(self) -> TaborChannelTuple:
"""Returns the channel tuple that this marker channel belongs to"""
return self._channel_tuple()
def _set_channel_tuple(self, channel_tuple: TaborChannelTuple) -> None:
"""
The channel tuple 'channel_tuple' is assigned to this marker channel
Args:
channel_tuple (TaborChannelTuple): the channel tuple that this marker channel belongs to
"""
self._channel_tuple = weakref.ref(channel_tuple)
def _select(self) -> None:
"""
This marker channel is selected and is now the active channel marker of the device
"""
self.device.channels[int((self.idn - 1) / 2)]._select()
self.device[SCPI].send_cmd(":SOUR:MARK:SEL {marker}".format(marker=(((self.idn - 1) % 2) + 1)))
class TaborUndefinedState(TaborException):
"""
If this exception is raised the attached tabor device is in an undefined state.
It is highly recommended to call reset it.f
"""
def __init__(self, *args, device: Union[TaborDevice, TaborChannelTuple]):
super().__init__(*args)
self.device = device
def reset_device(self):
if isinstance(self.device, TaborDevice):
self.device[TaborDeviceControl].reset()
elif isinstance(self.device, TaborChannelTuple):
self.device.cleanup()
self.device[TaborProgramManagement].clear()
| 43.316739
| 130
| 0.62175
|
4a08ad4762d18a398bb486749a9f0df7b877a8ba
| 811
|
py
|
Python
|
tests/unit/proxy/test_janus_graph_proxy.py
|
keyko-io/nevermined-amundsen-metadatalibrary
|
a615e6cb2e99e6fb6dacca03ff0adcdfcb2909bd
|
[
"Apache-2.0"
] | 1
|
2020-08-20T16:22:09.000Z
|
2020-08-20T16:22:09.000Z
|
tests/unit/proxy/test_janus_graph_proxy.py
|
keyko-io/nevermined-amundsen-metadatalibrary
|
a615e6cb2e99e6fb6dacca03ff0adcdfcb2909bd
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/proxy/test_janus_graph_proxy.py
|
keyko-io/nevermined-amundsen-metadatalibrary
|
a615e6cb2e99e6fb6dacca03ff0adcdfcb2909bd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from typing import Any, Mapping
import unittest
from metadata_service.proxy.janus_graph_proxy import JanusGraphGremlinProxy
from .abstract_gremlin_proxy_tests import abstract_gremlin_proxy_test_class
class JanusGraphGremlinProxyTest(
abstract_gremlin_proxy_test_class(), unittest.TestCase): # type: ignore
def _create_gremlin_proxy(self, config: Mapping[str, Any]) -> JanusGraphGremlinProxy:
# Don't use PROXY_HOST, PROXY_PORT, PROXY_PASSWORD. They might not be JanusGraph
return JanusGraphGremlinProxy(host=config['JANUS_GRAPH_URL'])
# this may not work locally, depending on setup. Remove the line below
# to run the abstract gremlin tests with neptune
del JanusGraphGremlinProxyTest
| 36.863636
| 89
| 0.802713
|
4a08b06ca536ddf236304b6ddec4fc627ee68719
| 2,082
|
py
|
Python
|
js2esi/tools/util.py
|
akamai/js2esi
|
324347603a73d713f2582f5d44fcbce80746b723
|
[
"Apache-2.0"
] | 14
|
2017-09-01T20:28:06.000Z
|
2022-02-18T04:17:10.000Z
|
js2esi/tools/util.py
|
akamai/js2esi
|
324347603a73d713f2582f5d44fcbce80746b723
|
[
"Apache-2.0"
] | null | null | null |
js2esi/tools/util.py
|
akamai/js2esi
|
324347603a73d713f2582f5d44fcbce80746b723
|
[
"Apache-2.0"
] | 1
|
2018-05-21T18:11:37.000Z
|
2018-05-21T18:11:37.000Z
|
import os
import sys
import platform
import subprocess
from contextlib import contextmanager
from js2esi import version
__author__ = "Colin Bendell"
__copyright__ = "Copyright 2017, Akamai Technologies"
__license__ = "Apache2"
@contextmanager
def chdir(dir):
orig_dir = os.getcwd()
os.chdir(dir)
yield
os.chdir(orig_dir)
def dump_system_info():
git_describe = 'release version'
with chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))):
try:
c = ['git', 'describe', '--tags', '--long']
git_describe = subprocess.check_output(c, stderr=subprocess.STDOUT)
last_tag, tag_dist, commit = git_describe.decode().strip().rsplit("-", 2)
if last_tag.startswith('v'):
# remove the 'v' prefix
last_tag = last_tag[1:]
if commit.startswith('g'):
# remove the 'g' prefix added by recent git versions
commit = commit[1:]
# build the same version specifier as used for snapshots by rtool
git_describe = "{version}dev{tag:04}-0x{commit}".format(
version=last_tag,
tag=int(tag_dist),
commit=commit,
)
except:
pass
bin_indicator = "" # PyInstaller builds indicator, if using precompiled binary
if getattr(sys, 'frozen', False):
bin_indicator = "Precompiled Binary"
data = [
"js2esi version: {} ({}) {}".format(version.VERSION, git_describe, bin_indicator),
"Python version: {}".format(platform.python_version()),
"Platform: {}".format(platform.platform()),
]
d = platform.linux_distribution()
t = "Linux distro: %s %s %s" % d
if d[0]: # pragma: no cover
data.append(t)
d = platform.mac_ver()
t = "Mac version: %s %s %s" % d
if d[0]: # pragma: no cover
data.append(t)
d = platform.win32_ver()
t = "Windows version: %s %s %s %s" % d
if d[0]: # pragma: no cover
data.append(t)
return "\n".join(data)
| 29.742857
| 90
| 0.585975
|
4a08b1bc32052d110af98e7a7b2bf5f6d970bc0c
| 77,607
|
py
|
Python
|
editor/models.py
|
arnabsenapati/editor
|
0b0d1de2d9bd2ccb85c8e4c9f2a52db637107774
|
[
"Apache-2.0"
] | 1
|
2022-03-25T02:37:49.000Z
|
2022-03-25T02:37:49.000Z
|
editor/models.py
|
arnabsenapati/editor
|
0b0d1de2d9bd2ccb85c8e4c9f2a52db637107774
|
[
"Apache-2.0"
] | null | null | null |
editor/models.py
|
arnabsenapati/editor
|
0b0d1de2d9bd2ccb85c8e4c9f2a52db637107774
|
[
"Apache-2.0"
] | null | null | null |
import uuid
import os
import re
from copy import deepcopy
import shutil
from zipfile import ZipFile
import json
from datetime import datetime
from itertools import groupby
import codecs
from pathlib import Path
import urllib.parse
try:
# For Python > 2.7
from collections import OrderedDict
except ImportError:
# For Python < 2.6 (after installing ordereddict)
from ordereddict import OrderedDict
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.staticfiles import finders
from django.core.exceptions import ValidationError
from django.core.files.storage import default_storage
from django.urls import reverse
from django.db import models, transaction
from django.db.models import signals, Max, Min, Exists, OuterRef
from django.db.models.functions import Lower
from django.dispatch import receiver
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.db.models import Q
from django.forms import model_to_dict
from django.utils import timezone
from django.utils.deconstruct import deconstructible
from django.template.loader import get_template
from django.core.mail import send_mail
from .slugify import slugify
import reversion
import reversion.models
from notifications.signals import notify
from notifications.models import Notification
import taggit.models
from taggit.managers import TaggableManager
import numbasobject
from .notify_watching import notify_watching
from .jsonfield import JSONField
PUBLIC_ACCESS_CHOICES = (('hidden', 'Hidden'), ('view', 'Public can view'), ('edit', 'Public can edit'))
USER_ACCESS_CHOICES = (('view', 'Can view'), ('edit', 'Can edit'))
@deconstructible
class ControlledObject(object):
"""
An object with controls on who can view, edit, copy and delete it.
Classes inheriting this must implement:
* owner : User
* is_published : () -> bool
* superuser_sees_everything : bool
* access : GenericRelation to IndividualAccess
"""
superuser_sees_everything = True
@property
def owner(self):
raise NotImplementedError
def is_published(self):
raise NotImplementedError
def has_access(self, user, accept_levels):
if user.is_anonymous:
return False
return self.access.filter(user=user, access__in=accept_levels).exists()
def can_be_viewed_by(self, user):
if getattr(settings, 'EVERYTHING_VISIBLE', False):
return True
return (self.superuser_sees_everything and user.is_superuser) or (self.owner == user) or self.is_published() or (self.has_access(user, ('view', 'edit')))
def can_be_edited_by(self, user):
return (user.is_superuser) or (self.owner == user) or self.has_access(user, ('edit',))
def can_be_copied_by(self, user):
return self.can_be_viewed_by(user)
def can_be_deleted_by(self, user):
return user == self.owner
def __eq__(self, other):
return True
@classmethod
def filter_can_be_edited_by(cls, user):
if user.is_superuser and cls.superuser_sees_everything:
return Q()
elif user.is_anonymous:
return Q(pk=None)
else:
return (Q(pk__in=user.individual_accesses.for_model(cls).filter(access='edit').values('object_id'))
| Q(author=user)
| Q(project__in=user.individual_accesses.for_model(Project).filter(access='edit').values('object_id'))
| Q(project__owner=user)
)
@classmethod
def filter_can_be_viewed_by(cls, user):
if getattr(settings, 'EVERYTHING_VISIBLE', False):
return Q()
view_perms = ('edit', 'view')
if user.is_superuser and cls.superuser_sees_everything:
return Q()
elif user.is_anonymous:
return Q(published=True)
else:
return (Q(pk__in=user.individual_accesses.for_model(cls).filter(access__in=view_perms).values('object_id'))
| Q(published=True)
| Q(author=user)
| Q(project__in=user.individual_accesses.for_model(Project).values('object_id'))
| Q(project__owner=user)
)
class TimelineMixin(object):
"""
A model which produces a timeline item when it is created.
Models inheriting from this should implement either
* self.object, or
* self.timeline_object() and self.can_be_deleted_by(user)
as well as a GenericRelation `timelineitems` to TimelineItem
"""
def can_be_deleted_by(self, user):
try:
if self.object.author == user:
return True
except AttributeError:
pass
return user == self.user
def can_be_viewed_by(self, user):
raise NotImplementedError
def timeline_object(self):
try:
return self.object
except AttributeError:
ct = ContentType.objects.get(pk=self.object_content_type.pk)
return ct.get_object_for_this_type(pk=self.object_id)
@property
def timelineitem(self):
return self.timelineitems.get()
LOCALE_CHOICES = [(y, x) for x, y in settings.GLOBAL_SETTINGS['NUMBAS_LOCALES']]
def reassign_content(from_user,to_user):
with transaction.atomic():
for p in from_user.own_projects.all():
p.owner = to_user
p.save()
for pi in from_user.project_invitations.all():
if not pi.project.has_access(to_user,(pi.access,)):
pi.user = to_user
pi.save()
for e in from_user.own_extensions.all():
e.author = to_user
e.save()
for t in from_user.own_themes.all():
t.author = to_user
t.save()
for a in from_user.individual_accesses.all():
a.combine_access(to_user)
for cpt in from_user.own_custom_part_types.all():
cpt.author = to_user
cpt.save()
for r in from_user.resources.all():
r.owner = to_user
r.save()
for ei in from_user.own_items.all():
ei.author = to_user
ei.save()
for sb in from_user.site_broadcasts.all():
sb.author = to_user
sb.save()
class Project(models.Model, ControlledObject):
name = models.CharField(max_length=200)
owner = models.ForeignKey(User, related_name='own_projects', on_delete=models.CASCADE)
access = GenericRelation('IndividualAccess', related_query_name='project', content_type_field='object_content_type', object_id_field='object_id')
timeline = GenericRelation('TimelineItem', related_query_name='projects', content_type_field='timeline_content_type', object_id_field='timeline_id')
timeline_noun = 'project'
public_view = models.BooleanField(default=False)
watching_non_members = models.ManyToManyField(User, related_name='watched_projects')
unwatching_members = models.ManyToManyField(User, related_name='unwatched_projects')
icon = 'briefcase'
description = models.TextField(blank=True)
default_locale = models.CharField(max_length=10, editable=True, default='en-GB')
default_licence = models.ForeignKey('Licence', null=True, blank=True, on_delete=models.SET_NULL)
custom_part_types = models.ManyToManyField('CustomPartType', related_name='projects')
class Meta:
ordering = ['name']
def is_published(self):
return self.public_view
def get_absolute_url(self):
return reverse('project_index', args=(self.pk,))
def has_access(self, user, levels):
if user==self.owner:
return True
return super().has_access(user,levels)
def members(self):
return [self.owner]+self.non_owner_members()
def non_owner_members(self):
return list(User.objects.filter(individual_accesses__in=self.access.all()).exclude(pk=self.owner.pk))
def all_timeline(self):
items = self.timeline.all() | TimelineItem.objects.filter(
Q(editoritems__project=self) |
Q(item_queue_entry__queue__project = self) |
Q(item_queue_entries__queue__project = self)
)
items.order_by('-date')
return items
@property
def watching_users(self):
q = (User.objects.filter(pk=self.owner.pk) | User.objects.filter(individual_accesses__in=self.access.all()) | self.watching_non_members.all()).distinct()
return q.exclude(pk__in=self.unwatching_members.all())
def __str__(self):
return self.name
def num_published_questions(self):
return self.items.questions().filter(published=True).count()
def num_published_exams(self):
return self.items.exams().filter(published=True).count()
def folder_hierarchy(self):
folders = self.folders.all()
tree = []
folder_dict = {f.pk: {'folder': f, 'subfolders': []} for f in folders}
for f in folders:
if f.parent and f.parent.pk in folder_dict:
folder_dict[f.parent.pk]['subfolders'].append(folder_dict[f.pk])
else:
tree.append(folder_dict[f.pk])
return tree
def get_folder_breadcrumbs(self,path):
breadcrumbs = []
if len(path):
parent = None
for name in path.split('/'):
try:
folder = self.folders.get(name=urllib.parse.unquote(name),parent=parent)
except Folder.MultipleObjectsReturned:
folders = self.folders.filter(name=urllib.parse.unquote(name),parent=parent)
folder = folders[0]
with transaction.atomic():
for ofolder in folders[1:]:
ofolder.merge_into(folder)
breadcrumbs.append(folder)
parent = folder
return breadcrumbs
def get_folder(self,path):
breadcrumbs = self.get_folder_breadcrumbs(path)
if len(breadcrumbs):
return breadcrumbs[-1]
else:
return None
@classmethod
def filter_can_be_edited_by(cls, user):
if user.is_superuser and cls.superuser_sees_everything:
return Q()
elif user.is_anonymous:
return Q(pk=None)
else:
return (Q(pk__in=user.individual_accesses.for_model(cls).filter(access='edit').values('object_id'))
| Q(owner=user)
)
@classmethod
def filter_can_be_viewed_by(cls, user):
if getattr(settings, 'EVERYTHING_VISIBLE', False):
return Q()
view_perms = ('edit', 'view')
if user.is_superuser and cls.superuser_sees_everything:
return Q()
elif user.is_anonymous:
return Q(public_view=True)
else:
return (Q(pk__in=user.individual_accesses.for_model(cls).filter(access__in=view_perms).values('object_id'))
| Q(public_view=True)
| Q(owner=user)
)
class IndividualAccessManager(models.Manager):
def for_model(self,model):
return self.filter(object_content_type=ContentType.objects.get_for_model(model))
class IndividualAccess(models.Model, TimelineMixin):
objects = IndividualAccessManager()
object_content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
object = GenericForeignKey('object_content_type', 'object_id')
user = models.ForeignKey(User, related_name='individual_accesses', on_delete=models.CASCADE)
access = models.CharField(default='view', editable=True, choices=USER_ACCESS_CHOICES, max_length=6)
timelineitems = GenericRelation('TimelineItem', related_query_name='individual_accesses', content_type_field='object_content_type', object_id_field='object_id')
timelineitem_template = 'timeline/access.html'
def __str__(self):
return f'{self.access} access granted to {self.user} on {self.object}'
def combine_access(self, to_user):
order = ['view','edit']
try:
a2 = IndividualAccess.objects.get(user=to_user, object_content_type=self.object_content_type, object_id=self.object_id)
level = sorted([self.access,a2.access],key=order.index)[-1]
if level != a2.access:
a2.access = level
a2.save()
except IndividualAccess.DoesNotExist:
self.user = to_user
self.save()
def can_be_viewed_by(self, user):
return self.object.can_be_viewed_by(user)
def timeline_object(self):
return self.object
class ProjectInvitation(models.Model):
email = models.EmailField()
invited_by = models.ForeignKey(User, on_delete=models.CASCADE, related_name='project_invitations')
access = models.CharField(default='view', editable=True, choices=USER_ACCESS_CHOICES, max_length=6)
project = models.ForeignKey(Project, related_name='invitations', on_delete=models.CASCADE)
applied = models.BooleanField(default=False)
def __str__(self):
return "Invitation for {} to join {}".format(self.email, self.project)
@receiver(signals.post_save, sender=ProjectInvitation)
def send_project_invitation(instance, created, **kwargs):
if created:
template = get_template('project/invitation_email.txt')
content = template.render({'invitation':instance, 'SITE_TITLE':settings.SITE_TITLE})
subject = 'Invitation to join project "{}", on {}'.format(instance.project.name, settings.SITE_TITLE)
send_mail(subject, content, from_email=settings.DEFAULT_FROM_EMAIL, recipient_list=(instance.email,))
@receiver(signals.post_save, sender=User)
def apply_project_invitations(instance, created, **kwargs):
if created:
invitations = ProjectInvitation.objects.filter(email__iexact=instance.email)
for invitation in invitations:
project = invitation.project
if not project.has_access(instance,(invitation.access,)):
try:
access = IndividualAccess.objects.get(project=project,user=instance)
access.access = invitation.access
access.save()
except IndividualAccess.DoesNotExist:
IndividualAccess.objects.create(object=project, user=instance, access=invitation.access)
invitation.applied = True
invitation.save()
class EditorTag(taggit.models.TagBase):
official = models.BooleanField(default=False)
class Meta:
verbose_name = 'tag'
ordering = ['name']
def used_count(self):
return self.tagged_items.count()
#check that the .exam file for an object is valid and defines at the very least a name
def validate_content(content):
try:
obj = numbasobject.NumbasObject(content)
if not 'name' in obj.data:
raise ValidationError('No "name" property in content.')
except Exception as err:
raise ValidationError(err)
class EditablePackageMixin(object):
"""
A package whose contents can be edited by users with the right access privileges.
Extensions and themes are editable packages.
"""
package_noun = None
def filenames(self):
top = Path(self.extracted_path)
for d,dirs,files in os.walk(str(top)):
rd = Path(d).relative_to(top)
if str(rd)=='.' or not re.match(r'^\.',str(rd)):
for f in sorted(files,key=str):
if not re.match(r'^\.',f):
yield str(rd / f)
def directory_contents(self, directory):
top = Path(self.extracted_path) / directory
if not top.exists():
return
for p in top.iterdir():
yield p.relative_to(self.extracted_path)
def write_file(self,filename,content):
root = os.path.abspath(self.extracted_path)
path = os.path.abspath(os.path.join(root,filename))
if not path.startswith(root+os.sep):
raise Exception("You may not write a file outside the {package_noun}'s directory".format(package_noun=self.package_noun))
dpath = Path(path).parent
dpath.mkdir(parents=True,exist_ok=True)
with open(path,'w',encoding='utf-8') as f:
f.write(content)
@property
def relative_extracted_path(self):
raise NotImplementedError
@property
def extracted_path(self):
return os.path.join(os.getcwd(), settings.MEDIA_ROOT, self.relative_extracted_path)
def url_for(self, filename):
return settings.MEDIA_URL+self.relative_extracted_path+'/'+filename
def ensure_extracted_path_exists(self):
if os.path.exists(self.extracted_path):
shutil.rmtree(self.extracted_path)
os.makedirs(self.extracted_path)
@property
def readme_filename(self):
names = ['README.md','README.html','README']
for name in names:
if self.has_file(name):
return name
return names[0]
def has_file(self, filename):
return os.path.exists(os.path.join(self.extracted_path,filename))
class Extension(models.Model, ControlledObject, EditablePackageMixin):
name = models.CharField(max_length=200, help_text='A human-readable name for the extension')
location = models.CharField(default='', max_length=200, help_text='A unique identifier for this extension', verbose_name='Short name', blank=True, unique=True)
url = models.CharField(max_length=300, blank=True, verbose_name='Documentation URL', help_text='Address of a page about the extension. Leave blank to use the README file.')
public = models.BooleanField(default=False, help_text='Can this extension be seen by everyone?')
slug = models.SlugField(max_length=200, editable=False, unique=False, default='an-extension')
author = models.ForeignKey(User, related_name='own_extensions', blank=True, null=True, on_delete=models.CASCADE)
last_modified = models.DateTimeField(auto_now=True)
zipfile_folder = 'user-extensions'
zipfile = models.FileField(upload_to=zipfile_folder+'/zips', blank=True, null=True, max_length=255, verbose_name='Extension package', help_text='A .zip package containing the extension\'s files')
editable = models.BooleanField(default=True, help_text='Is this extension stored within the editor\'s media folder?')
runs_headless = models.BooleanField(default=True, help_text='Can this extension run outside a browser?')
access = GenericRelation('IndividualAccess', related_query_name='extension', content_type_field='object_content_type', object_id_field='object_id')
superuser_sees_everything = False
package_noun = 'extension'
timeline_noun = 'extension'
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def has_access(self, user, levels):
if user==self.author:
return True
return super().has_access(user,levels)
@property
def owner(self):
return self.author
def is_published(self):
return self.public
@classmethod
def filter_can_be_viewed_by(cls, user):
if getattr(settings, 'EVERYTHING_VISIBLE', False):
return Q()
view_perms = ('edit', 'view')
if cls.superuser_sees_everything and user.is_superuser:
return Q()
elif user.is_anonymous:
return Q(public=True)
else:
return (Q(pk__in=user.individual_accesses.for_model(cls).filter(access__in=view_perms).values('object_id'))
| Q(public=True)
| Q(author=user)
)
def as_json(self):
d = {
'name': self.name,
'url': reverse('extension_documentation',args=(self.pk,)),
'pk': self.pk,
'location': self.location,
'author': self.author.pk if self.author is not None else None,
'edit_url': reverse('extension_edit', args=(self.pk,)),
'script_url': self.script_root,
}
path = self.script_path
if path is not None:
d['hasScript'] = True
d['scriptURL'] = path
d['scripts'] = list(self.scripts())
return d
def scripts(self):
for f in Path(self.extracted_path).iterdir():
if f.suffix == '.js':
yield f.name
@property
def main_filename(self):
return self.location+'.js'
@property
def script_root(self):
if self.editable:
return settings.MEDIA_URL+self.zipfile_folder+'/extracted/'+str(self.pk)+'/'+self.location+'/'
else:
path = 'js/numbas/extensions/%s/' % (self.location)
return settings.STATIC_URL+path
return None
@property
def script_path(self):
return self.script_root + self.main_filename
@property
def relative_extracted_path(self):
if self.pk is None:
raise Exception("This object doesn't have an ID yet.")
return os.path.join(self.zipfile_folder, 'extracted', str(self.pk), self.location)
@property
def extracted_path(self):
if self.editable:
return super().extracted_path
else:
return os.path.join(settings.GLOBAL_SETTINGS['NUMBAS_PATH'], 'extensions', self.location)
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Extension, self).save(*args, **kwargs)
def extract_zip(self):
if not self.zipfile:
return
self.ensure_extracted_path_exists()
_, extension = os.path.splitext(self.zipfile.name)
if extension.lower() == '.zip':
z = ZipFile(self.zipfile.file, 'r')
z.extractall(self.extracted_path)
elif extension.lower() == '.js':
file = open(os.path.join(self.extracted_path, self.location+'.js'), 'wb')
file.write(self.zipfile.file.read())
file.close()
def get_absolute_url(self):
return reverse('extension_documentation',args=(self.pk,))
def icon(self):
return 'wrench'
@receiver(signals.pre_save)
def extract_editable_package_zip_pre(sender,instance,**kwargs):
if not isinstance(instance,EditablePackageMixin):
return
changed_zipfile = False
if instance.zipfile:
try:
old_extension = instance.__class__.objects.get(pk=instance.pk)
changed_zipfile = old_extension.zipfile != instance.zipfile
except instance.__class__.DoesNotExist:
changed_zipfile = True
instance.__changed_zipfile = changed_zipfile
@receiver(signals.post_save)
def extract_editable_package_zip_post(sender,instance,**kwargs):
if not isinstance(instance,EditablePackageMixin):
return
if getattr(instance,'__changed_zipfile',False):
instance.extract_zip()
@receiver(signals.pre_delete, sender=Extension)
def delete_extracted_extension(sender,instance,**kwargs):
if not instance.editable:
return
p = Path(instance.extracted_path).parent
if p.exists():
shutil.rmtree(str(p))
class Theme(models.Model, ControlledObject, EditablePackageMixin):
name = models.CharField(max_length=200)
public = models.BooleanField(default=False, help_text='Can this theme be seen by everyone?')
slug = models.SlugField(max_length=200, editable=False, unique=False)
author = models.ForeignKey(User, related_name='own_themes', on_delete=models.CASCADE)
last_modified = models.DateTimeField(auto_now=True)
zipfile_folder = 'user-themes'
zipfile = models.FileField(upload_to=zipfile_folder+'/zips', max_length=255, verbose_name='Theme package', help_text='A .zip package containing the theme\'s files')
access = GenericRelation('IndividualAccess', related_query_name='theme', content_type_field='object_content_type', object_id_field='object_id')
package_noun = 'theme'
timeline_noun = 'theme'
editable = True
def __str__(self):
return self.name
def has_access(self, user, levels):
if user==self.author:
return True
return super().has_access(user,levels)
@property
def owner(self):
return self.author
def is_published(self):
return self.public
@classmethod
def filter_can_be_viewed_by(cls, user):
if getattr(settings, 'EVERYTHING_VISIBLE', False):
return Q()
view_perms = ('edit', 'view')
if cls.superuser_sees_everything and user.is_superuser:
return Q()
elif user.is_anonymous:
return Q(public=True)
else:
return (Q(pk__in=user.individual_accesses.for_model(cls).filter(access__in=view_perms).values('object_id'))
| Q(public=True)
| Q(author=user)
)
@property
def relative_extracted_path(self):
return os.path.join(self.zipfile_folder, 'extracted', str(self.pk))
@property
def main_filename(self):
if self.has_file('inherit.txt'):
return 'inherit.txt'
else:
return self.readme_filename
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Theme, self).save(*args, **kwargs)
def extract_zip(self):
if not self.zipfile:
return
self.ensure_extracted_path_exists()
z = ZipFile(self.zipfile.file, 'r')
z.extractall(self.extracted_path)
def get_absolute_url(self):
return reverse('theme_edit',args=(self.pk,))
def icon(self):
return 'sunglasses'
@receiver(signals.pre_delete, sender=Theme)
def reset_theme_on_delete(sender, instance, **kwargs):
default_theme = settings.GLOBAL_SETTINGS['NUMBAS_THEMES'][0][1]
for exam in instance.used_in_newexams.all():
exam.custom_theme = None
exam.theme = default_theme
exam.save()
CUSTOM_PART_TYPE_PUBLIC_CHOICES = [
('restricted', 'Only to permitted users'),
('always', 'Always available'),
('select', 'When selected'),
]
CUSTOM_PART_TYPE_INPUT_WIDGETS = [
('string', 'String'),
('number', 'Number'),
('jme', 'Mathematical expression'),
('matrix', 'Matrix'),
('radios', 'Radio buttons'),
('checkboxes', 'Choose several from a list'),
('dropdown', 'Drop-down box'),
]
class CustomPartType(models.Model, ControlledObject):
author = models.ForeignKey(User, related_name='own_custom_part_types', on_delete=models.CASCADE)
name = models.CharField(max_length=200, verbose_name='Name')
short_name = models.CharField(max_length=200, unique=True, verbose_name='Unique identifier for this part type')
description = models.TextField(default='', blank=True, verbose_name='What\'s this part type for?')
input_widget = models.CharField(max_length=200, verbose_name='Answer input method')
input_options = JSONField(blank=True, verbose_name='Options for the answer input method')
can_be_gap = models.BooleanField(default=True, verbose_name='Can this part be a gap?')
can_be_step = models.BooleanField(default=True, verbose_name='Can this part be a step?')
marking_script = models.TextField(default='', blank=True, verbose_name='Marking algorithm')
marking_notes = JSONField(blank=True,default='[]', verbose_name='Marking algorithm notes')
settings = JSONField(blank=True)
help_url = models.URLField(blank=True, verbose_name='URL of documentation')
public_availability = models.CharField(max_length=10, choices=CUSTOM_PART_TYPE_PUBLIC_CHOICES, verbose_name='Public availability', default='restricted')
ready_to_use = models.BooleanField(default=False, verbose_name='Ready to use?')
copy_of = models.ForeignKey('self', null=True, related_name='copies', on_delete=models.SET_NULL)
extensions = models.ManyToManyField(Extension, blank=True, related_name='custom_part_types')
access = GenericRelation('IndividualAccess', related_query_name='custom_part_type', content_type_field='object_content_type', object_id_field='object_id')
timeline_noun = 'part type'
icon = 'ok'
def copy(self, author, name):
new_type = CustomPartType.objects.get(pk=self.pk)
new_type.pk = None
new_type.id = None
new_type.author = author
new_type.public_availability = 'restricted'
new_type.name = name
new_type.set_short_name(slugify(name))
new_type.copy_of = self
new_type.save()
new_type.extensions.set(self.extensions.all())
return new_type
def __str__(self):
return self.name
@property
def filename(self):
return slugify(self.name)
def __repr__(self):
return '<CustomPartType: {}>'.format(self.short_name)
def get_absolute_url(self):
return reverse('custom_part_type_edit', args=(self.pk,))
@property
def owner(self):
return self.author
def is_published(self):
return self.public_availability in ('always', 'select')
def set_short_name(self, slug):
built_in_part_types = ['jme','numberentry','patternmatch','matrix','gapfill','information','extension','1_n_2','m_n_2','m_n_x']
if slug in built_in_part_types:
slug = 'custom-'+slug
short_name = slug
i = 0
while CustomPartType.objects.exclude(pk=self.pk).filter(short_name=short_name).exists():
i += 1
short_name = '{}-{}'.format(slug,i)
self.short_name = short_name
def has_access(self, user, levels):
if 'view' in levels:
if self.published:
return True
if user==self.owner or user.is_superuser:
return True
return super().has_access(user,levels)
@classmethod
def filter_can_be_viewed_by(cls, user):
if getattr(settings, 'EVERYTHING_VISIBLE', False):
return Q()
q_public = Q(public_availability__in=('always','select'))
view_perms = ('edit', 'view')
if cls.superuser_sees_everything and user.is_superuser:
return Q()
elif user.is_anonymous:
return q_public
else:
return (Q(pk__in=user.individual_accesses.for_model(cls).filter(access__in=view_perms).values('object_id'))
| q_public
| Q(author=user)
)
@property
def published(self):
return self.public_availability != 'restricted'
def as_json(self):
return {
'source': {
'pk': self.pk,
'author': {
'name': self.author.get_full_name(),
'pk': self.author.pk,
},
'edit_page': reverse('custom_part_type_edit', args=(self.pk,)),
},
'name': self.name,
'short_name': self.short_name,
'description': self.description,
'help_url': self.help_url,
'input_widget': self.input_widget,
'input_options': self.input_options,
'can_be_gap': self.can_be_gap,
'can_be_step': self.can_be_step,
'marking_script': self.marking_script,
'marking_notes': self.marking_notes,
'settings': self.settings,
'public_availability': self.public_availability,
'published': self.published,
'extensions': [e.location for e in self.extensions.all()],
}
def as_source(self):
obj = self.as_json()
obj['source'] = {
'author': {
'name': self.author.get_full_name(),
}
}
return obj
class Resource(models.Model):
owner = models.ForeignKey(User, related_name='resources', on_delete=models.CASCADE)
date_created = models.DateTimeField(auto_now_add=True)
file = models.FileField(upload_to='question-resources/', max_length=255)
alt_text = models.TextField(blank=True)
def __str__(self):
return self.file.name
@property
def resource_url(self):
return 'resources/%s' % self.file.name
@property
def filetype(self):
name,ext = os.path.splitext(self.file.name)
return ext
def get_created_time(self):
return default_storage.get_created_time(self.file.name)
def is_image(self):
return self.filetype.lower() in ('.png','.jpg','.svg','.gif')
def delete(self, *args, **kwargs):
self.file.delete(save=False)
super(Resource, self).delete(*args, **kwargs)
def as_json(self):
return {
'url': self.resource_url,
'name': self.file.name,
'pk': self.pk,
'alt_text': self.alt_text,
}
class Licence(models.Model):
name = models.CharField(max_length=80, unique=True)
short_name = models.CharField(max_length=20, unique=True)
can_reuse = models.BooleanField(default=True)
can_modify = models.BooleanField(default=True)
can_sell = models.BooleanField(default=True)
url = models.URLField(blank=True)
full_text = models.TextField(blank=True)
def __str__(self):
return self.name
def as_json(self):
return {
'name': self.name,
'short_name': self.short_name,
'can_reuse': self.can_reuse,
'can_modify': self.can_modify,
'can_sell': self.can_sell,
'url': self.url,
'pk': self.pk,
}
STAMP_STATUS_CHOICES = (
('ok', 'Ready to use'),
('dontuse', 'Should not be used'),
('problem', 'Has some problems'),
('broken', 'Doesn\'t work'),
('pleasetest', 'Needs to be tested'),
)
class AbilityFramework(models.Model):
name = models.CharField(max_length=200, blank=False, unique=True)
description = models.TextField(blank=False)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
ABILITY_PRECISION = 10
class AbilityLevel(models.Model):
name = models.CharField(max_length=200, blank=False, unique=True)
description = models.TextField(blank=False)
start = models.DecimalField(max_digits=ABILITY_PRECISION+1, decimal_places=ABILITY_PRECISION)
end = models.DecimalField(max_digits=ABILITY_PRECISION+1, decimal_places=ABILITY_PRECISION)
framework = models.ForeignKey(AbilityFramework, related_name='levels', on_delete=models.CASCADE)
class Meta:
ordering = ('framework', 'start',)
def __str__(self):
return self.name
class Taxonomy(models.Model):
name = models.CharField(max_length=200, blank=False, unique=True)
description = models.TextField(blank=False)
json = JSONField(blank=True) # the JSON encoding of the taxonomy's nodes takes a while, and a lot of database queries, to make, so it's stored here and updated each time a node changes
class Meta:
verbose_name_plural = 'taxonomies'
def __str__(self):
return self.name
def forest(self):
"""
The nodes in the taxonomy, returned as a list of trees associating each node to its children.
"""
key = lambda n:(len(n.code),n.code)
def make_tree(node):
return [(n,make_tree(n)) for n in sorted(node.children.all(), key=key)]
return [(n,make_tree(n)) for n in sorted(self.nodes.filter(parent=None),key=key)]
def create_json(self):
def tree_json(leaves):
return [{
'pk': node.pk,
'name': node.name,
'code': node.code,
'children': tree_json(kids)
} for node,kids in leaves]
self.json = tree_json(self.forest())
return self.json
class TaxonomyNode(models.Model):
name = models.CharField(max_length=200, blank=False, unique=False)
parent = models.ForeignKey('TaxonomyNode', on_delete = models.CASCADE, related_name='children', blank=True, null=True)
taxonomy = models.ForeignKey(Taxonomy, related_name='nodes', on_delete=models.CASCADE)
code = models.CharField(max_length=200, blank=False)
def __str__(self):
return self.name
@receiver(signals.post_save, sender=TaxonomyNode)
def update_taxonomy_json(instance, **kwargs):
t = instance.taxonomy
t.create_json()
t.save()
class AbilityLevelField(models.FloatField):
pass
class TaggedItem(taggit.models.GenericTaggedItemBase):
tag = models.ForeignKey(EditorTag, related_name='tagged_editoritems', on_delete=models.CASCADE)
class TaggedQuestion(taggit.models.GenericTaggedItemBase):
tag = models.ForeignKey(EditorTag, related_name='tagged_items', on_delete=models.CASCADE)
NUMBAS_FILE_VERSION = 'exam_results_page_options'
@deconstructible
class NumbasObject(object):
def get_parsed_content(self):
if self.content:
self.parsed_content = numbasobject.NumbasObject(self.content)
self.name = self.parsed_content.data['name']
elif self.name:
self.parsed_content = numbasobject.NumbasObject(data={'name': self.name}, version=NUMBAS_FILE_VERSION)
self.metadata = self.parsed_content.data.get('metadata', self.metadata)
self.content = str(self.parsed_content)
return self.parsed_content
def set_name(self, name):
self.name = name
if self.content:
self.get_parsed_content()
self.parsed_content.data['name'] = name
self.content = str(self.parsed_content)
self.save()
def __eq__(self, other):
return self.content == other.content
class EditorItemManager(models.Manager):
def questions(self):
return self.exclude(question=None)
def exams(self):
return self.exclude(exam=None)
def published(self):
return self.filter(published=True)
class Contributor(models.Model):
item = models.ForeignKey('EditorItem', on_delete=models.CASCADE, related_name='contributors')
user = models.ForeignKey(User, related_name='item_contributions', on_delete=models.CASCADE, blank=True, null=True)
name = models.CharField(max_length=200,blank=True)
profile_url = models.URLField(blank=True)
def __str__(self):
name = self.user.get_full_name() if self.user else self.name
return '{} on "{}"'.format(name,self.item)
def as_json(self, request):
if self.user:
user = self.user
profile_url = reverse('view_profile',args=(user.pk,))
if request:
profile_url = request.build_absolute_uri(profile_url)
return {
'name': user.get_full_name(),
'profile_url': profile_url,
}
else:
return {
'name': self.name,
'profile_url': self.profile_url,
}
class Meta:
unique_together = (("item","user"))
class Folder(models.Model):
name = models.CharField(max_length=200)
project = models.ForeignKey(Project, null=False, related_name='folders', on_delete=models.CASCADE)
parent = models.ForeignKey('Folder', null=True, related_name='folders', on_delete=models.CASCADE)
class Meta:
unique_together = (('name', 'project', 'parent'),)
ordering = ('name',)
def clean(self):
if self.parent==self:
raise ValidationError("A folder can't be its own parent.")
def __str__(self):
return '/'.join([self.project.name]+[f.name for f in self.parents()])
def parents(self):
bits = []
f = self
while f:
bits.insert(0,f)
f = f.parent
return bits
def path(self):
return '/'.join(urllib.parse.quote(f.name) for f in self.parents())
def get_absolute_url(self):
return reverse('project_browse',args=(self.project.pk, self.path()+'/'))
def as_json(self):
return {
'pk': self.pk,
'url': self.get_absolute_url(),
'name': self.name,
}
def merge_into(self,folder):
for item in self.items.all():
item.folder = folder
item.save()
for subfolder in Folder.objects.filter(parent=self):
subfolder.parent = folder
subfolder.save()
self.delete()
def all_contents(self):
queue = [self]
folders = []
items = []
while queue:
f = queue.pop()
folders.append(f)
items += f.items.all()
queue += f.folders.all()
return folders, items
@reversion.register
class EditorItem(models.Model, NumbasObject, ControlledObject):
"""
Base model for exams and questions - each exam or question has a reference to an instance of this
"""
objects = EditorItemManager()
name = models.CharField(max_length=200)
slug = models.SlugField(max_length=200, editable=False, unique=False)
timeline = GenericRelation('TimelineItem', related_query_name='editoritems', content_type_field='timeline_content_type', object_id_field='timeline_id')
comments = GenericRelation('Comment', content_type_field='object_content_type', object_id_field='object_id')
author = models.ForeignKey(User, related_name='own_items', on_delete=models.CASCADE)
licence = models.ForeignKey(Licence, null=True, blank=True, on_delete=models.SET_NULL)
project = models.ForeignKey(Project, null=True, related_name='items', on_delete=models.CASCADE)
folder = models.ForeignKey(Folder, null=True, related_name='items', on_delete=models.SET_NULL)
access = GenericRelation('IndividualAccess', related_query_name='editoritem', content_type_field='object_content_type', object_id_field='object_id')
content = models.TextField(blank=True, validators=[validate_content])
metadata = JSONField(blank=True)
created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
copy_of = models.ForeignKey('self', null=True, related_name='copies', on_delete=models.SET_NULL)
tags = TaggableManager(through=TaggedItem)
current_stamp = models.ForeignKey('NewStampOfApproval', blank=True, null=True, on_delete=models.SET_NULL)
share_uuid_view = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
share_uuid_edit = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
published = models.BooleanField(default=False)
published_date = models.DateTimeField(null=True)
ability_level_start = AbilityLevelField(null=True)
ability_level_end = AbilityLevelField(null=True)
ability_levels = models.ManyToManyField(AbilityLevel)
taxonomy_nodes = models.ManyToManyField(TaxonomyNode, related_name='editoritems')
unwatching_users = models.ManyToManyField(User, related_name='unwatched_items')
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def __unicode__(self):
return self.name
@property
def watching_users(self):
q = (User.objects.filter(pk=self.author.pk) | User.objects.filter(individual_accesses__in=self.access.all())).distinct() | self.project.watching_users
return q.exclude(pk__in=self.unwatching_users.all())
@property
def owner(self):
return self.author
def is_published(self):
return self.published
def get_current_stamp(self):
if self.current_stamp is not None:
return self.current_stamp
else:
return NewStampOfApproval(object=self,status='draft')
def has_access(self, user, levels):
return self.project.has_access(user, levels) or super().has_access(user,levels)
def can_be_viewed_by(self, user):
if self.item_type=='exam' and getattr(settings,'EXAM_ACCESS_REQUIRES_QUESTION_ACCESS',False):
for q in self.exam.questions.all():
if not q.editoritem.can_be_viewed_by(user):
return False
return super().can_be_viewed_by(user)
def can_be_copied_by(self, user):
if not super().can_be_copied_by(user):
return False
if self.can_be_edited_by(user):
return True
if self.licence:
return self.licence.can_reuse and self.licence.can_modify
else:
return True
def publish(self):
self.published = True
self.published_date = timezone.now()
def unpublish(self):
self.published = False
def set_licence(self, licence):
NumbasObject.get_parsed_content(self)
metadata = self.parsed_content.data.setdefault(u'metadata', {})
metadata['licence'] = licence.name if licence is not None else None
self.licence = licence
self.content = str(self.parsed_content)
def copy(self, author=None):
e2 = deepcopy(self)
e2.id = None
e2.share_uuid_view = uuid.uuid4()
e2.share_uuid_edit = uuid.uuid4()
e2.current_stamp = None
e2.published = False
e2.published_date = None
e2.copy_of = self
e2.folder = None
if author is not None:
e2.author = author
return e2
def get_absolute_url(self):
return self.rel_obj.get_absolute_url()
@property
def item_type(self):
if hasattr(self, 'exam'):
return 'exam'
elif hasattr(self, 'question'):
return 'question'
@property
def rel_obj(self):
""" the exam/question object corresponding to this item (to make contructing the URLs easier, mainly) """
if hasattr(self, 'exam'):
return self.exam
elif hasattr(self, 'question'):
return self.question
def as_numbasobject(self,request):
obj = self.exam if self.item_type=='exam' else self.question
numbasobj = obj.as_numbasobject(request)
return numbasobj
@property
def icon(self):
return self.rel_obj.icon
@property
def theme_path(self):
return self.rel_obj.theme_path
def edit_dict(self):
"""
Dictionary of information passed to edit view
"""
self.get_parsed_content()
return {
'id': self.rel_obj.id,
'editoritem_id': self.id,
'project_id': self.project.id,
'author': self.author_id,
'metadata': self.metadata,
'published': self.published,
'JSONContent': self.parsed_content.data,
'tags': [t.name for t in self.tags.all()],
'taxonomy_nodes': [n.pk for n in self.taxonomy_nodes.all()],
'ability_levels': [a.pk for a in self.ability_levels.all()],
}
@property
def filename(self):
return '{}-{}-{}'.format(self.item_type, self.pk, self.slug)
@property
def network(self):
ei = self
while ei.copy_of:
ei = ei.copy_of
return sorted(ei.descendants(), key=lambda x: x.created)
def descendants(self):
return [self]+sum([ei2.descendants() for ei2 in self.copies.all()], [])
def summary(self, user=None):
current_stamp = self.get_current_stamp()
obj = {
'editoritem_id': self.id,
'name': self.name,
'published': self.published,
'metadata': self.metadata,
'created': str(self.created),
'last_modified': str(self.last_modified),
'author': self.author.get_full_name(),
'current_stamp': current_stamp.status,
'current_stamp_display': current_stamp.get_status_display()
}
variables = []
if self.item_type == 'exam':
obj['id'] = self.exam.id
elif self.item_type == 'question':
obj['id'] = self.question.id
if user:
obj['canEdit'] = self.can_be_edited_by(user)
return obj
def merge(self, other):
oname = self.name
self.content = other.content
self.metadata = other.metadata
self.tags.set(*other.tags.all())
self.ability_levels.clear()
self.ability_levels.add(*other.ability_levels.all())
self.set_name(oname)
self.rel_obj.merge(other.rel_obj)
self.save()
@receiver(signals.post_save, sender=EditorItem)
def author_contributes_to_editoritem(instance, created, **kwargs):
if created:
Contributor.objects.get_or_create(item=instance,user=instance.author)
@receiver(signals.pre_save, sender=EditorItem)
def set_editoritem_name(instance, **kwargs):
NumbasObject.get_parsed_content(instance)
instance.slug = slugify(instance.name)
if 'metadata' in instance.parsed_content.data:
licence_name = instance.parsed_content.data['metadata'].get('licence', None)
else:
licence_name = None
instance.licence = Licence.objects.filter(name=licence_name).first()
@receiver(signals.pre_save, sender=EditorItem)
def set_ability_level_limits(instance, **kwargs):
if instance.pk is None:
return
ends = instance.ability_levels.aggregate(Min('start'), Max('end'))
instance.ability_level_start = ends.get('start__min', None)
instance.ability_level_end = ends.get('end__max', None)
@receiver(signals.post_delete, sender=EditorItem)
def delete_notifications_for_item(instance, **kwargs):
Notification.objects.filter(target_object_id=instance.pk, target_content_type=ContentType.objects.get_for_model(EditorItem)).delete()
class PullRequestManager(models.Manager):
def open(self):
return self.filter(open=True)
class PullRequest(models.Model, ControlledObject, TimelineMixin):
objects = PullRequestManager()
# user who created this request
owner = models.ForeignKey(User, related_name='pullrequests_created', on_delete=models.CASCADE)
# user who accepted or rejected this request
closed_by = models.ForeignKey(User, related_name='pullrequests_closed', null=True, blank=True, on_delete=models.SET_NULL)
source = models.ForeignKey(EditorItem, related_name='outgoing_pull_requests', on_delete=models.CASCADE)
destination = models.ForeignKey(EditorItem, related_name='incoming_pull_requests', on_delete=models.CASCADE)
open = models.BooleanField(default=True)
accepted = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
comment = models.TextField(blank=True)
timelineitems = GenericRelation('TimelineItem', related_query_name='pull_requests', content_type_field='object_content_type', object_id_field='object_id')
timelineitem_template = 'timeline/pull_request.html'
@property
def object(self):
return self.destination
def timeline_user(self):
if self.open:
return self.owner
else:
return self.closed_by
def has_access(self, user, accept_levels):
return self.destination.has_access(user, accept_levels) or user == self.owner
def can_be_merged_by(self, user):
return self.destination.can_be_edited_by(user)
def can_be_deleted_by(self, user):
return super().can_be_deleted_by(user) or self.destination.can_be_edited_by(user)
def can_be_viewed_by(self, user):
return self.source.can_be_viewed_by(user) and self.destination.can_be_viewed_by(user)
def clean(self):
if self.source == self.destination:
raise ValidationError({'source': "Source and destination are the same."})
def validate_unique(self, exclude=None):
if self.open and PullRequest.objects.filter(source=self.source, destination=self.destination, open=True).exists():
raise ValidationError("There's already an open pull request between these items.")
def accept(self, user):
self.accepted = True
self.destination.merge(self.source)
self.close(user)
self.save()
def reject(self, user):
self.accepted = False
self.close(user)
self.save()
def close(self, user):
self.open = False
self.closed_by = user
class Timeline(object):
def __init__(self, items, viewing_user):
self.viewing_user = viewing_user
items = items.prefetch_related('object')
nonsticky_broadcasts = SiteBroadcast.objects.visible_now().exclude(sticky=True)
view_filter = Q(editoritems__published=True) | Q(object_content_type=ContentType.objects.get_for_model(SiteBroadcast), object_id__in=nonsticky_broadcasts)
if not self.viewing_user.is_anonymous:
projects = Project.objects.filter(Q(owner=self.viewing_user) | Q(pk__in=self.viewing_user.individual_accesses.for_model(Project).values('object_id')) | Q(watching_non_members=self.viewing_user)).values('pk')
editoritems = EditorItem.objects.filter(Q(author=self.viewing_user) | Q(pk__in=self.viewing_user.individual_accesses.for_model(EditorItem).values('object_id'))).values('pk')
queues = ItemQueue.objects.filter(Q(owner=self.viewing_user) | Q(pk__in=self.viewing_user.individual_accesses.for_model(ItemQueue).values('object_id'))).values('pk')
items_for_user = (
Q(editoritems__in=editoritems) |
Q(editoritems__project__in=projects) |
Q(projects__in=projects) |
Q(item_queue_entry__queue__project__in = projects) |
Q(item_queue_entry__queue__in = queues) |
Q(item_queue_entries__queue__project__in = projects) |
Q(item_queue_entries__queue__in = queues)
)
view_filter = view_filter | items_for_user
filtered_items = items.filter(view_filter)
if not self.viewing_user.is_anonymous:
filtered_items = filtered_items.exclude(hidden_by=self.viewing_user)
self.filtered_items = filtered_items
def __getitem__(self, index):
return self.filtered_items.__getitem__(index)
class TimelineItemManager(models.Manager):
def visible_to(self, user):
objects = self.exclude(hidden_by=user)
return objects
class TimelineItem(models.Model):
objects = TimelineItemManager()
# Object whose timeline this item belongs to
timeline_content_type = models.ForeignKey(ContentType, related_name='timelineitem_timeline', null=True, on_delete=models.CASCADE)
timeline_id = models.PositiveIntegerField(null=True)
timeline = GenericForeignKey('timeline_content_type', 'timeline_id')
# Reference to an object representing this item (e.g. a Comment)
object_content_type = models.ForeignKey(ContentType, related_name='timelineitem_object', on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
object = GenericForeignKey('object_content_type', 'object_id')
user = models.ForeignKey(User, related_name='timelineitems', null=True, on_delete=models.CASCADE)
hidden_by = models.ManyToManyField(User, related_name='hidden_timelineitems', blank=True)
date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{}: {}'.format(self.date, str(self.object))
def can_be_deleted_by(self, user):
try:
return self.object.can_be_deleted_by(user)
except AttributeError:
return False
def can_be_viewed_by(self, user):
return self.user == user or self.object.can_be_viewed_by(user)
class Meta:
unique_together = (('object_id', 'object_content_type'),)
ordering = ('-date',)
@receiver(signals.post_delete, sender=TimelineItem)
def delete_timelineitem_object(instance, *args, **kwargs):
if instance.object is not None:
instance.object.delete()
class SiteBroadcastManager(models.Manager):
def visible_now(self):
return self.filter(Q(show_until__gte=timezone.now()) | Q(show_until=None))
class SiteBroadcast(models.Model, TimelineMixin):
objects = SiteBroadcastManager()
author = models.ForeignKey(User, related_name='site_broadcasts', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
sticky = models.BooleanField(default=False)
show_until = models.DateTimeField(null=True, blank=True)
timelineitems = GenericRelation(TimelineItem, related_query_name='site_broadcasts', content_type_field='object_content_type', object_id_field='object_id')
timelineitem_template = 'timeline/site_broadcast.html'
def can_be_deleted_by(self, user):
return False
def can_be_viewed_by(self, user):
return True
def timeline_object(self):
return None
def __str__(self):
return self.text[:50]
class Tip(models.Model):
title = models.CharField(max_length=500)
text = models.TextField()
link = models.URLField(blank=True, null=True, verbose_name='Link to more information')
link_text = models.CharField(blank=True, null=True, max_length=200)
editoritem = models.ForeignKey(EditorItem, related_name='used_in_tips', blank=True, null=True, on_delete=models.SET_NULL, verbose_name='A question or exam demonstrating the tip')
def __str__(self):
return self.title
def __repr__(self):
return 'Tip "{}"'.format(self.title)
class NewStampOfApproval(models.Model, TimelineMixin):
object = models.ForeignKey(EditorItem, related_name='stamps', on_delete=models.CASCADE)
timelineitems = GenericRelation(TimelineItem, related_query_name='stamps', content_type_field='object_content_type', object_id_field='object_id')
timelineitem_template = 'timeline/stamp.html'
user = models.ForeignKey(User, related_name='newstamps', on_delete=models.CASCADE)
status = models.CharField(choices=STAMP_STATUS_CHOICES, max_length=20)
date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{} said "{}"'.format(self.user.username, self.get_status_display())
def can_be_viewed_by(self, user):
return self.object.can_be_viewed_by(user)
class Meta:
ordering = ('-date',)
@receiver(signals.post_save, sender=NewStampOfApproval)
@receiver(signals.post_delete, sender=NewStampOfApproval)
def set_current_stamp(instance, **kwargs):
instance.object.current_stamp = NewStampOfApproval.objects.filter(object=instance.object).order_by('-date').first()
instance.object.save()
@receiver(signals.post_save, sender=NewStampOfApproval)
def notify_stamp(instance, **kwargs):
notify_watching(instance.user, target=instance.object, verb='gave feedback on', action_object=instance)
class Comment(models.Model, TimelineMixin):
object_content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
object = GenericForeignKey('object_content_type', 'object_id')
timelineitems = GenericRelation(TimelineItem, related_query_name='comments', content_type_field='object_content_type', object_id_field='object_id')
timelineitem_template = 'timeline/comment.html'
user = models.ForeignKey(User, related_name='comments', on_delete=models.CASCADE)
text = models.TextField()
def __str__(self):
return 'Comment by {} on {}: "{}"'.format(self.user.get_full_name(), str(self.object), self.text[:47]+'...' if len(self.text) > 50 else self.text)
def can_be_viewed_by(self, user):
return self.object.can_be_viewed_by(user)
@receiver(signals.post_save, sender=Comment)
def notify_comment(instance, **kwargs):
notify_watching(instance.user, target=instance.object, verb='commented on', action_object=instance)
class RestorePoint(models.Model, TimelineMixin):
object = models.ForeignKey(EditorItem, related_name='restore_points', on_delete=models.CASCADE)
timelineitems = GenericRelation(TimelineItem, related_query_name='restore_points', content_type_field='object_content_type', object_id_field='object_id')
timelineitem_template = 'timeline/restore_point.html'
user = models.ForeignKey(User, related_name='restore_points', on_delete=models.CASCADE)
description = models.TextField()
revision = models.ForeignKey(reversion.models.Revision, on_delete=models.CASCADE)
def __str__(self):
return 'Restore point set by {} on {}: "{}"'.format(self.user.get_full_name(), str(self.object), self.description[:47]+'...' if len(self.description) > 50 else self.description)
def can_be_viewed_by(self, user):
return self.object.can_be_viewed_by(user)
ITEM_CHANGED_VERBS = [('created', 'created')]
class ItemChangedTimelineItem(models.Model, TimelineMixin):
object = models.ForeignKey(EditorItem, on_delete=models.CASCADE)
verb = models.CharField(choices=ITEM_CHANGED_VERBS, editable=False, max_length=10)
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='item_changed_timelineitems')
timelineitems = GenericRelation(TimelineItem, related_query_name='item_changes', content_type_field='object_content_type', object_id_field='object_id')
timelineitem_template = 'timeline/change.html'
def can_be_viewed_by(self, user):
return self.object.can_be_viewed_by(user)
def can_be_deleted_by(self, user):
return False
def icon(self):
return {
'created': 'plus',
'deleted': 'remove',
'published': 'globe',
}[self.verb]
def __str__(self):
return '{} {} {}'.format(self.user.get_full_name(), self.verb, str(self.object))
@receiver(signals.post_save)
def create_timelineitem(sender, instance, created, **kwargs):
if not issubclass(sender, TimelineMixin):
return
if created:
try:
user = User.objects.get(pk=instance.user.pk)
except AttributeError:
user = None
TimelineItem.objects.create(object=instance, timeline=instance.timeline_object(), user=user)
@reversion.register
class NewQuestion(models.Model):
editoritem = models.OneToOneField(EditorItem, on_delete=models.CASCADE, related_name='question')
resources = models.ManyToManyField(Resource, blank=True, related_name='questions')
extensions = models.ManyToManyField(Extension, blank=True, related_name='questions')
custom_part_types = models.ManyToManyField(CustomPartType, blank=True, related_name='questions')
theme_path = os.path.join(settings.GLOBAL_SETTINGS['NUMBAS_PATH'], 'themes', 'question')
icon = 'file'
class Meta:
verbose_name = 'question'
ordering = ['editoritem__name']
permissions = (
('highlight', 'Can pick questions to feature on the front page.'),
)
def __str__(self):
return self.editoritem.name
def __unicode__(self):
return self.editoritem.name
def get_absolute_url(self):
return reverse('question_edit', args=(self.pk, self.editoritem.slug))
@property
def resource_paths(self):
return [(r.file.name, r.file.path) for r in self.resources.all()]
def as_numbasobject(self,request):
self.editoritem.get_parsed_content()
contributor_data = [c.as_json(request) for c in self.editoritem.contributors.all()]
question_data = self.editoritem.parsed_content.data
question_data['contributors'] = contributor_data
extensions = list(self.extensions.all())
for cpt in self.custom_part_types.all():
extensions += list(cpt.extensions.all())
extensions = set(extensions)
data = OrderedDict([
('name', self.editoritem.name),
('extensions', [e.location for e in extensions]),
('custom_part_types', [p.as_json() for p in self.custom_part_types.all()]),
('resources', self.resource_paths),
('navigation', {'allowregen': True, 'showfrontpage': False, 'preventleave': False}),
('question_groups', [{'pickingStrategy':'all-ordered', 'questions':[question_data]}]),
])
data['contributors'] = contributor_data
obj = numbasobject.NumbasObject(data=data, version=self.editoritem.parsed_content.version)
return obj
def edit_dict(self):
d = self.editoritem.edit_dict()
d['extensions'] = [e.location for e in self.extensions.all()]
d['resources'] = [res.as_json() for res in self.resources.all()]
return d
def summary(self, user=None):
obj = self.editoritem.summary(user)
obj['url'] = reverse('question_edit', args=(self.pk, self.editoritem.slug,))
obj['deleteURL'] = reverse('question_delete', args=(self.pk, self.editoritem.slug))
content = self.editoritem.get_parsed_content()
variables = content.data.get('variables',{})
tvariables = {k:v for k,v in variables.items() if v.get('can_override')}
obj['variables'] = tvariables
return obj
@property
def exams_using_this(self):
return self.exams.distinct()
def copy(self, author=None):
q2 = deepcopy(self)
q2.id = None
ei2 = self.editoritem.copy(author)
ei2.save()
q2.editoritem = ei2
q2.save()
q2.resources.set(self.resources.all())
q2.extensions.set(self.extensions.all())
q2.save()
return q2
def merge(self, other):
self.resources.clear()
self.resources.add(*other.resources.all())
self.extensions.clear()
self.extensions.add(*other.extensions.all())
self.save()
@receiver(signals.post_save, sender=NewQuestion)
def set_question_custom_part_types(instance, **kwargs):
q = instance
c = NumbasObject.get_parsed_content(q.editoritem)
parts = c.data.get('parts',[])
all_parts = parts[:]
for p in parts:
all_parts += [s for s in p.get('steps',[])] + [g for g in p.get('gaps',[])]
part_types = set(p['type'] for p in all_parts)
q.custom_part_types.clear()
custom_part_types = CustomPartType.objects.filter(short_name__in=part_types)
q.custom_part_types.add(*custom_part_types)
@reversion.register
class NewExam(models.Model):
editoritem = models.OneToOneField(EditorItem, on_delete=models.CASCADE, related_name='exam')
questions = models.ManyToManyField(NewQuestion, through='NewExamQuestion', blank=True, editable=False, related_name='exams')
theme = models.CharField(max_length=200, default='default', blank=True) # used if custom_theme is None
custom_theme = models.ForeignKey(Theme, null=True, blank=True, on_delete=models.SET_NULL, related_name='used_in_newexams')
locale = models.CharField(max_length=200, default='en-GB')
icon = 'book'
class Meta:
verbose_name = 'exam'
def __str__(self):
return self.editoritem.name
def __unicode__(self):
return self.editoritem.name
def get_absolute_url(self):
return reverse('exam_edit', args=(self.pk, self.editoritem.slug))
@property
def resources(self):
return Resource.objects.filter(questions__in=self.questions.all()).distinct()
@property
def resource_paths(self):
return [(r.file.name, r.file.path) for r in self.resources.all()]
@property
def theme_path(self):
if self.custom_theme:
return self.custom_theme.extracted_path
else:
return os.path.join(settings.GLOBAL_SETTINGS['NUMBAS_PATH'], 'themes', self.theme)
def as_numbasobject(self,request):
obj = numbasobject.NumbasObject(self.editoritem.content)
data = obj.data
question_groups = self.question_groups
data['contributors'] = [c.as_json(request) for c in self.editoritem.contributors.all()]
data['extensions'] = [e.location for e in self.extensions]
data['custom_part_types'] = [p.as_json() for p in self.custom_part_types]
data['name'] = self.editoritem.name
if 'question_groups' not in data:
data['question_groups'] = self.question_groups_dict()
for i, g in enumerate(data['question_groups']):
if i < len(question_groups):
questions = question_groups[i]
else:
questions = []
def question_object(q):
data = q.editoritem.as_numbasobject(request).data
del data['question_groups']
data.update(q.editoritem.parsed_content.data)
return data
g['questions'] = [question_object(q) for q in questions]
data['resources'] = self.resource_paths
return obj
def edit_dict(self):
"""
Dictionary of information passed to update view
"""
exam_dict = self.editoritem.edit_dict()
exam_dict['locale'] = self.locale
exam_dict['custom_theme'] = self.custom_theme_id
exam_dict['theme'] = self.theme
exam_dict['question_groups'] = self.question_groups_dict()
return exam_dict
def question_groups_dict(self):
groups = groupby(self.newexamquestion_set.order_by('group', 'qn_order'), key=lambda q: q.group)
return [{'group':group, 'questions':[q.question.summary() for q in qs]} for group, qs in groups]
@property
def question_groups(self):
groups = []
for eq in self.newexamquestion_set.all():
while len(groups) < eq.group+1:
groups.append([])
groups[eq.group].append(eq.question)
return groups
@property
def extensions(self):
return Extension.objects.filter(questions__in=self.questions.all()).distinct()
@property
def custom_part_types(self):
return CustomPartType.objects.filter(questions__in=self.questions.all()).distinct()
def set_question_groups(self, question_groups):
with transaction.atomic():
self.questions.clear()
for group_number, group in enumerate(question_groups):
for order, pk in enumerate(group):
exam_question = NewExamQuestion(exam=self, question=NewQuestion.objects.get(pk=pk), qn_order=order, group=group_number)
exam_question.save()
def copy(self, author=None):
e2 = deepcopy(self)
e2.id = None
ei2 = self.editoritem.copy(author)
ei2.save()
e2.editoritem = ei2
e2.save()
for eq in NewExamQuestion.objects.filter(exam=self):
NewExamQuestion.objects.create(exam=e2, question=eq.question, qn_order=eq.qn_order, group=eq.group)
e2.custom_theme = self.custom_theme
e2.save()
return e2
def merge(self, other):
with transaction.atomic():
for eq in other.newexamquestion_set.all():
exam_question = NewExamQuestion(exam=self, question=eq.question, qn_order=eq.qn_order, group=eq.group)
exam_question.save()
self.theme = other.theme
self.custom_theme = other.custom_theme
self.locale = other.locale
self.save()
class NewExamQuestion(models.Model):
"""
Through model for a question belonging to an exam.
Specifies position the question should appear in.
"""
class Meta:
ordering = ['qn_order']
exam = models.ForeignKey(NewExam, on_delete=models.CASCADE)
question = models.ForeignKey(NewQuestion, on_delete=models.CASCADE)
qn_order = models.PositiveIntegerField()
group = models.PositiveIntegerField(default=0)
@receiver(signals.post_save, sender=NewQuestion)
@receiver(signals.post_save, sender=NewExam)
def item_created_timeline_event(instance, created, **kwargs):
if created:
ItemChangedTimelineItem.objects.create(user=instance.editoritem.author, object=instance.editoritem, verb='created')
class ItemQueueManager(models.Manager):
def visible_to(self,user):
return self.filter(ItemQueue.filter_can_be_viewed_by(user))
class ItemQueue(models.Model, ControlledObject):
objects = ItemQueueManager()
owner = models.ForeignKey(User, null=True, on_delete=models.SET_NULL, related_name='own_queues')
name = models.CharField(max_length=200)
project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='queues')
description = models.TextField(blank=True)
instructions_submitter = models.TextField(blank=True, verbose_name='Instructions for submitters')
instructions_reviewer = models.TextField(blank=True, verbose_name='Instructions for reviewers')
public = models.BooleanField(default=False, verbose_name='Visible to everyone?')
access = GenericRelation('IndividualAccess', related_query_name='item_queue', content_type_field='object_content_type', object_id_field='object_id')
timeline_noun = 'queue'
icon = 'list'
def __str__(self):
return self.name
def is_published(self):
return self.public
def can_be_viewed_by(self, user):
return super().can_be_viewed_by(user) or self.project.can_be_viewed_by(user)
@classmethod
def filter_can_be_viewed_by(cls, user):
if getattr(settings, 'EVERYTHING_VISIBLE', False):
return Q()
view_perms = ('edit', 'view')
if cls.superuser_sees_everything and user.is_superuser:
return Q()
elif user.is_anonymous:
return Q(public=True)
else:
return (Q(pk__in=user.individual_accesses.for_model(cls).filter(access__in=view_perms).values('object_id'))
| Q(public=True)
| Q(owner=user)
| Q(project__in=Project.objects.filter(Project.filter_can_be_viewed_by(user)))
)
@property
def watching_users(self):
query = Q(pk=self.owner.pk) | Q(individual_accesses__item_queue=self)
return User.objects.filter(query).distinct()
def get_absolute_url(self):
return reverse('queue_view', args=(self.pk,))
class ItemQueueChecklistItem(models.Model):
queue = models.ForeignKey(ItemQueue, on_delete=models.CASCADE, related_name='checklist')
label = models.CharField(max_length=500)
def as_json(self):
return {
'pk': self.pk,
'queue': self.queue.pk,
'label': self.label,
}
class ItemQueueEntryManager(models.Manager):
def incomplete(self):
return self.filter(complete=False)
class ItemQueueEntry(models.Model, ControlledObject, TimelineMixin):
objects = ItemQueueEntryManager()
icon = 'list'
class Meta:
ordering = ['-pk']
def __str__(self):
return f'"{self.item.name}" in the queue "{self.queue.name}"'
def get_absolute_url(self):
return reverse('queue_entry_review', args=(self.pk,))
queue = models.ForeignKey(ItemQueue, on_delete=models.CASCADE, related_name='entries')
item = models.ForeignKey(EditorItem, on_delete=models.CASCADE, related_name='queue_entries')
created_by = models.ForeignKey(User, on_delete=models.CASCADE, related_name='queue_entries')
note = models.TextField(blank=True)
complete = models.BooleanField(default=False)
comments = GenericRelation('Comment', content_type_field='object_content_type', object_id_field='object_id', related_query_name='item_queue_entry')
timeline = GenericRelation('TimelineItem', related_query_name='item_queue_entry', content_type_field='timeline_content_type', object_id_field='timeline_id')
timelineitems = GenericRelation(TimelineItem, related_query_name='item_queue_entries', content_type_field='object_content_type', object_id_field='object_id')
timelineitem_template = 'timeline/item_queue_entry.html'
def timeline_object(self):
return self.queue
def checklist_items(self):
return self.queue.checklist.all().annotate(ticked=Exists(ItemQueueChecklistTick.objects.filter(item=OuterRef('pk'), entry=self)))
@property
def name(self):
return "Review of \"{}\" in \"{}\"".format(self.item.name, self.queue.name)
def is_published(self):
return self.public
@property
def owner(self):
return self.created_by
def can_be_edited_by(self, user):
return self.queue.can_be_edited_by(user)
def can_be_viewed_by(self, user):
return self.queue.can_be_viewed_by(user) or self.project.can_be_viewed_by(user)
def progress(self):
total_items = self.queue.checklist.count()
ticked_items = self.queue.checklist.filter(ticks__entry=self).distinct().count()
return ticked_items/total_items
@property
def watching_users(self):
query = Q(pk=self.created_by.pk) | Q(comments__item_queue_entry=self)
return User.objects.filter(query).distinct()
@receiver(signals.post_save, sender=ItemQueueEntry)
def notify_comment(instance, created, **kwargs):
if created:
notify_watching(instance.created_by, target=instance.queue, verb='submitted an item to', action_object=instance)
class ItemQueueChecklistTick(models.Model):
entry = models.ForeignKey(ItemQueueEntry, on_delete=models.CASCADE, related_name='ticks')
item = models.ForeignKey(ItemQueueChecklistItem, on_delete=models.CASCADE, related_name='ticks')
date = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User, null=True, on_delete=models.SET_NULL, related_name='queue_entry_ticks')
| 37.061605
| 219
| 0.665069
|
4a08b2f8d321ea85ac61a9a670d41e091102e866
| 430
|
py
|
Python
|
blender/arm/logicnode/physics/LN_get_world_gravity.py
|
Lykdraft/armory
|
da1cf33930ce9a8b1865d35c128fe4842bef2933
|
[
"Zlib"
] | null | null | null |
blender/arm/logicnode/physics/LN_get_world_gravity.py
|
Lykdraft/armory
|
da1cf33930ce9a8b1865d35c128fe4842bef2933
|
[
"Zlib"
] | null | null | null |
blender/arm/logicnode/physics/LN_get_world_gravity.py
|
Lykdraft/armory
|
da1cf33930ce9a8b1865d35c128fe4842bef2933
|
[
"Zlib"
] | null | null | null |
from arm.logicnode.arm_nodes import *
class GetGravityNode(ArmLogicTreeNode):
"""Returns the world gravity.
@seeNode Set Gravity
"""
bl_idname = 'LNGetGravityNode'
bl_label = 'Get World Gravity'
arm_version = 1
def init(self, context):
super(GetGravityNode, self).init(context)
self.add_output('NodeSocketVector', 'World Gravity')
add_node(GetGravityNode, category=PKG_AS_CATEGORY)
| 25.294118
| 60
| 0.706977
|
4a08b34202d03eb01b48f82f71abb3c6feda6456
| 389
|
py
|
Python
|
src/softfab/initlog.py
|
boxingbeetle/softfab
|
0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14
|
[
"BSD-3-Clause"
] | 20
|
2019-02-07T17:03:04.000Z
|
2020-03-16T20:45:19.000Z
|
src/softfab/initlog.py
|
boxingbeetle/softfab
|
0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14
|
[
"BSD-3-Clause"
] | 36
|
2019-02-11T08:57:16.000Z
|
2020-09-29T05:32:08.000Z
|
src/softfab/initlog.py
|
boxingbeetle/softfab
|
0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14
|
[
"BSD-3-Clause"
] | null | null | null |
# SPDX-License-Identifier: BSD-3-Clause
from pathlib import Path
import logging
from softfab.version import VERSION
def initLogging(dbDir: Path) -> None:
logging.basicConfig(
level=logging.INFO,
format='* %(asctime)s %(levelname)-8s> %(message)s',
filename=dbDir / 'cc-log.txt'
)
logging.info('> > Control Center startup, version %s', VERSION)
| 22.882353
| 67
| 0.660668
|
4a08b348f55b972b4926c044e4065c5c61d9041b
| 1,308
|
py
|
Python
|
AML/HW2/hw2_program/lda_use.py
|
ZRZ-Unknow/20fall-CourseNote
|
e20735fd1ca0949eaa1c50d5cd84f147ec714404
|
[
"MIT"
] | null | null | null |
AML/HW2/hw2_program/lda_use.py
|
ZRZ-Unknow/20fall-CourseNote
|
e20735fd1ca0949eaa1c50d5cd84f147ec714404
|
[
"MIT"
] | null | null | null |
AML/HW2/hw2_program/lda_use.py
|
ZRZ-Unknow/20fall-CourseNote
|
e20735fd1ca0949eaa1c50d5cd84f147ec714404
|
[
"MIT"
] | null | null | null |
import numpy as np
import nltk
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from utils import load_data
import argparse
def main(args):
np.random.seed(args.seed)
data = load_data()
tf_vec = CountVectorizer(max_df=0.95,min_df=2,max_features=1500,stop_words='english')
tf = tf_vec.fit_transform(data)
topic_nums = args.topic_nums
top_k_words = args.top_k_words
max_iter_nums = args.max_iter_nums
lda = LatentDirichletAllocation(n_components=topic_nums,max_iter=max_iter_nums,learning_method='batch')
lda.fit(tf)
tf_feature = tf_vec.get_feature_names()
with open(f'./assets/results_use_topic{topic_nums}_iter{max_iter_nums}.txt','w') as f:
for topic_idx,topic in enumerate(lda.components_):
f.write(f'Topic {topic_idx}:\n')
for i in topic.argsort()[:-top_k_words-1:-1]:
f.write(f' {tf_feature[i]} {topic[i]/sum(topic)}\n')
if __name__=='__main__':
p = argparse.ArgumentParser()
p.add_argument('--seed',default=0,type=int)
p.add_argument('--topic_nums',default=5,type=int)
p.add_argument('--top_k_words',default=10,type=int)
p.add_argument('--max_iter_nums',default=1000,type=int)
args = p.parse_args()
main(args)
| 39.636364
| 107
| 0.715596
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.