hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4769b77a6b07ea94825a12d364fc9b88d2123602 | 1,494 | py | Python | python_bindings/setup.py | mikeseven/Halide | 739513cc290d0351b69cedd31829032788bc3a7e | [
"MIT"
] | 1 | 2016-10-08T00:18:46.000Z | 2016-10-08T00:18:46.000Z | python_bindings/setup.py | mikeseven/Halide | 739513cc290d0351b69cedd31829032788bc3a7e | [
"MIT"
] | null | null | null | python_bindings/setup.py | mikeseven/Halide | 739513cc290d0351b69cedd31829032788bc3a7e | [
"MIT"
] | null | null | null | from distutils.core import setup
from distutils.extension import Extension
import os, os.path, sys
import glob
import subprocess
import shutil
build_prefix = os.getenv('BUILD_PREFIX')
if not build_prefix:
build_prefix = ''
halide_root = '..'
include_path = os.path.join(halide_root, 'include')
bin_path = os.path.join(halide_root, 'bin', build_prefix)
image_path = os.path.join(halide_root, 'apps', 'images')
png_cflags = subprocess.check_output('libpng-config --cflags', shell=True).strip()
png_ldflags = subprocess.check_output('libpng-config --ldflags', shell=True).strip()
ext_modules = [Extension("halide/_cHalide", ["halide/cHalide_wrap.cxx", 'halide/py_util.cpp'],
include_dirs=[include_path],
extra_compile_args=('-ffast-math -O3 -msse -Wl,-dead_strip -fno-common' + ' ' + png_cflags).split(),
extra_link_args=[os.path.join(bin_path, 'libHalide.a'), '-lpthread', '-ldl', '-lstdc++', '-lc']+png_ldflags.split(),
language='c++')]
if glob.glob('halide/data/*.png') == []:
shutil.copytree(image_path, 'halide/data')
setup(
name = 'halide',
version = '0.2',
author = 'Connelly Barnes',
license = 'MIT',
classifiers=[
"Topic :: Multimedia :: Graphics",
"Programming Language :: Python :: 2.7"],
packages=['halide'],
package_dir={'halide': 'halide'},
package_data={'halide': ['data/*.png']},
ext_modules = ext_modules
)
| 34.744186 | 141 | 0.637885 |
f64f11ab9e5ee65f90f42b7d7c618896508e115c | 4,389 | py | Python | pollbot/telegram/callback_handler/__init__.py | shubham-king/poll | 677e870bea36dffbf27f24e4cdeec892b40f7128 | [
"MIT"
] | 112 | 2019-06-11T17:52:57.000Z | 2022-03-18T00:05:21.000Z | pollbot/telegram/callback_handler/__init__.py | shubham-king/poll | 677e870bea36dffbf27f24e4cdeec892b40f7128 | [
"MIT"
] | 91 | 2019-05-28T11:33:40.000Z | 2022-02-27T12:12:07.000Z | pollbot/telegram/callback_handler/__init__.py | shubham-king/poll | 677e870bea36dffbf27f24e4cdeec892b40f7128 | [
"MIT"
] | 69 | 2019-07-10T16:58:06.000Z | 2022-03-30T22:09:44.000Z | """Callback query handling."""
from datetime import date
from sqlalchemy.exc import IntegrityError
from pollbot.enums import CallbackType
from pollbot.helper.stats import increase_stat, increase_user_stat
from pollbot.models import Option, UserStatistic
from pollbot.telegram.callback_handler.context import CallbackContext # noqa
from pollbot.telegram.session import callback_query_wrapper
from .context import get_context
from .mapping import async_callback_mapping, callback_mapping
from .vote import handle_vote
@callback_query_wrapper
def handle_callback_query(bot, update, session, user):
"""Handle synchronous callback queries.
Some critical callbacks shouldn't be allowed to be asynchronous,
since they tend to cause race conditions and integrity errors in the database
schema. That's why some calls are restricted to be synchronous.
"""
context = get_context(bot, update, session, user)
increase_user_stat(session, context.user, "callback_calls")
session.commit()
response = callback_mapping[context.callback_type](session, context)
# Callback handler functions always return the callback answer
# The only exception is the vote function, which is way too complicated and
# implements its own callback query answer logic.
if response is not None and context.callback_type != CallbackType.vote:
context.query.answer(response)
else:
context.query.answer("")
increase_stat(session, "callback_calls")
return
@callback_query_wrapper
def handle_async_callback_query(bot, update, session, user):
"""Handle asynchronous callback queries.
Most callback queries are unproblematic in terms of causing race-conditions.
Thereby they can be handled asynchronously.
However, we do handle votes asynchronously as an edge-case, since we want those
calls to be handled as fast as possible.
The race condition handling for votes is handled in the respective `handle_vote` function.
"""
context = get_context(bot, update, session, user)
# Vote logic needs some special handling
if context.callback_type == CallbackType.vote:
option = session.query(Option).get(context.payload)
if option is None:
return
poll = option.poll
# Ensure user statistics exist for this poll owner
# We need to track at least some user activity, since there seem to be some users which
# abuse the bot by creating polls and spamming up to 1 million votes per day.
#
# I really hate doing this, but I don't see another way to prevent DOS attacks
# without tracking at least some numbers.
user_statistic = session.query(UserStatistic).get((date.today(), poll.user.id))
if user_statistic is None:
user_statistic = UserStatistic(poll.user)
session.add(user_statistic)
try:
session.commit()
# Handle race condition for parallel user statistic creation
# Return the statistic that has already been created in another session
except IntegrityError as e:
session.rollback()
user_statistic = session.query(UserStatistic).get(
(date.today(), poll.user.id)
)
if user_statistic is None:
raise e
# Increase stats before we do the voting logic
# Otherwise the user might dos the bot by triggering flood exceptions
# before actually being able to increase the stats
increase_user_stat(session, context.user, "votes")
increase_user_stat(session, poll.user, "poll_callback_calls")
session.commit()
response = handle_vote(session, context, option)
else:
increase_user_stat(session, context.user, "callback_calls")
session.commit()
response = async_callback_mapping[context.callback_type](session, context)
# Callback handler functions always return the callback answer
# The only exception is the vote function, which is way too complicated and
# implements its own callback query answer logic.
if response is not None and context.callback_type != CallbackType.vote:
context.query.answer(response)
else:
context.query.answer("")
increase_stat(session, "callback_calls")
return
| 38.5 | 95 | 0.706083 |
ab65c3d6afb0c23fbf28a9ab647cadfe9273a44c | 1,565 | py | Python | fission/np-examp.py | qianl15/serverless-zoo | 5cf34492a9186e636fbc6ad7e603a01db55a54ca | [
"MIT"
] | null | null | null | fission/np-examp.py | qianl15/serverless-zoo | 5cf34492a9186e636fbc6ad7e603a01db55a54ca | [
"MIT"
] | null | null | null | fission/np-examp.py | qianl15/serverless-zoo | 5cf34492a9186e636fbc6ad7e603a01db55a54ca | [
"MIT"
] | null | null | null | import sys
from flask import request
from flask import current_app
import numpy as np
from google.cloud import storage
# Python env doesn't pass in an argument
# we can get request data by request.headers, request.get_data()
def main():
current_app.logger.info("Received request")
msg = '---HEADERS---\n{}\n--BODY--\n{}\n-----\n'.format(request.headers, request.get_data())
x = np.array(([1,2,3],[4,5,6],[7,8,9]))
y = np.array(([.1,.2,.3],[.4,.5,.6],[.7,.8,.9]))
z = np.matmul(x,y)
zz = np.array2string(z)
fd = open('npresult.txt', 'w')
fd.write(zz + '\n')
fd.close()
fin_res = ''
client = storage.Client()
bucket_fail = False
try:
bucket = client.get_bucket('fission-bucket')
except:
bucket_fail = True
print("Checking bucket...")
if not bucket_fail:
print("Bucket didn't fail")
# Upload file
blobul = bucket.blob('npres.txt')
blobul.upload_from_filename('npresult.txt')
# Download blob to read
blob = bucket.get_blob('aws_prop.txt')
if blob == None:
fin_res = 'Bucket found, blob not found'
else:
print("Getting blob...")
fin_res = blob.download_as_string()
else:
print("Bucket failed")
fin_res = 'Bucket not found'
msg2 = '--RESULT--\n{}\n{}\n-----\n'.format(zz, fin_res)
msg += msg2
current_app.logger.info(msg)
# You can return any http status code you like, simply place a comma after
# your return statement, and typing in the status code.
return msg, 200
| 25.655738 | 96 | 0.608946 |
677e5edf38508b7b5834b07534e8559097ebee28 | 495 | py | Python | accounts/utils.py | dieisonborges/sicario | 3282c7bc89e7e6ae087ab4b67a9387dca9c71a4f | [
"MIT"
] | 1 | 2019-04-16T12:42:36.000Z | 2019-04-16T12:42:36.000Z | accounts/utils.py | dieisonborges/sicario | 3282c7bc89e7e6ae087ab4b67a9387dca9c71a4f | [
"MIT"
] | 11 | 2019-02-28T02:39:56.000Z | 2019-02-28T19:19:15.000Z | accounts/utils.py | dieisonborges/sicario | 3282c7bc89e7e6ae087ab4b67a9387dca9c71a4f | [
"MIT"
] | 1 | 2019-04-28T13:36:44.000Z | 2019-04-28T13:36:44.000Z | import string, random, os
from datetime import datetime
def rand_gen(size=100):
current_time = datetime.now()
current_time = current_time.strftime("%Y%m%d%H%M%S")
chars = string.ascii_uppercase + string.digits
return current_time+("".join(random.choice(chars) for x in range(size)))
def path_and_rename(instance, filename):
upload_to = 'accounts/'
ext = filename.split('.')[-1]
filename = '{}.{}'.format(rand_gen(), ext)
return os.path.join(upload_to, filename) | 35.357143 | 76 | 0.692929 |
fcf11606d18d51e49c896007466ef03111633b44 | 1,134 | py | Python | plugins/aws/conftest.py | agilityroots/ramukaka | 4ade291f9b17795b9d04b47406818ad65918cbf6 | [
"MIT"
] | null | null | null | plugins/aws/conftest.py | agilityroots/ramukaka | 4ade291f9b17795b9d04b47406818ad65918cbf6 | [
"MIT"
] | 12 | 2018-01-16T04:05:10.000Z | 2018-02-01T05:20:11.000Z | plugins/aws/conftest.py | agilityroots/ramukaka | 4ade291f9b17795b9d04b47406818ad65918cbf6 | [
"MIT"
] | null | null | null | # References:
# https://docs.pytest.org/en/latest/fixture.html#fixture-function
import pytest
import os
CONFIG = {
'access_id': os.environ['ERRBOT_AWS_ACCESS_KEY'],
'secret_key': os.environ['ERRBOT_AWS_SECRET_KEY'],
'account_id': os.environ['ERRBOT_AWS_ACCOUNT_ID'],
'region': os.environ['ERRBOT_AWS_DEFAULT_REGION'],
'keypair': os.environ['ERRBOT_AWS_KEYPAIR_NAME'],
}
@pytest.fixture
def constants():
return {
"TEST_AMI_ID": 'ami-c24ef5bb',
"TEST_NODE_NAME": 'test_node',
"TEST_NODE_SIZE": 't2.micro'
}
@pytest.fixture
def config():
return CONFIG
@pytest.fixture(scope='class')
def driver():
"""
Get an initialized libcloud driver.
"""
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
xy = get_driver(Provider.EC2)
libcloud_driver = xy(CONFIG['access_id'], CONFIG['secret_key'], region=CONFIG['region'])
return libcloud_driver
@pytest.fixture
def awsadapter():
"""
Return an initialized AwsAdapter object.
"""
from awsadapter import AwsAdapter
return AwsAdapter(CONFIG)
| 23.625 | 92 | 0.691358 |
0a6bf2b8fd24ab830ecee360629be23d7cdf2855 | 1,063 | py | Python | src/api/views/operation/DataOperationJobDetailPageResource.py | jedicontributors/pythondataintegrator | 3e877b367ab9b20185476128ec053db41087879f | [
"MIT"
] | null | null | null | src/api/views/operation/DataOperationJobDetailPageResource.py | jedicontributors/pythondataintegrator | 3e877b367ab9b20185476128ec053db41087879f | [
"MIT"
] | null | null | null | src/api/views/operation/DataOperationJobDetailPageResource.py | jedicontributors/pythondataintegrator | 3e877b367ab9b20185476128ec053db41087879f | [
"MIT"
] | null | null | null | import json
from flask import make_response, request
from injector import inject
from IocManager import IocManager
from domain.operation.page.DataOperationJobDetailPage import DataOperationJobDetailPage
from domain.operation.page.DataOperationJobPage import DataOperationJobPage
from infrastructor.api.ResourceBase import ResourceBase
from infrastructor.json.JsonConvert import JsonConvert
from views.operation.PageModels import PageModels
@PageModels.ns.route('/Job/<int:data_operation_job_id>',doc=False)
class DataOperationJobDetailPageResource(ResourceBase):
@inject
def __init__(self, page: DataOperationJobDetailPage,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.page = page
@IocManager.api.representation('text/html')
def get(self, data_operation_job_id):
data = PageModels.parser.parse_args(request)
pagination = JsonConvert.FromJSON(json.dumps(data))
page = self.page.render(id=data_operation_job_id, pagination=pagination)
return make_response(page, 200)
| 37.964286 | 87 | 0.774224 |
6fca98e7432d12b53e72f1304dca83172998a535 | 156 | py | Python | text.py | sumathi-kanthakumar/PythonITMFCS | 720874976907b79a688377d794c0f14dcf77979c | [
"MIT"
] | null | null | null | text.py | sumathi-kanthakumar/PythonITMFCS | 720874976907b79a688377d794c0f14dcf77979c | [
"MIT"
] | null | null | null | text.py | sumathi-kanthakumar/PythonITMFCS | 720874976907b79a688377d794c0f14dcf77979c | [
"MIT"
] | null | null | null |
#Comment below two lines to run in jupyter
from colorama import init
init()
from colorama import Fore
print(Fore.BLUE + "Blue")
print(Fore.GREEN+ "Green")
| 19.5 | 42 | 0.75641 |
a7e7ef6c07da287ab23d2fe7c7b86e845c2d219b | 1,238 | py | Python | data/merge_claim_tweets_real.py | TIMAN-group/covid19_misinformation | 901d528dd4a98a113cba3dce3050f50b21e00bcb | [
"MIT"
] | 7 | 2020-11-06T03:33:31.000Z | 2022-03-19T05:49:17.000Z | data/merge_claim_tweets_real.py | TIMAN-group/covid19_misinformation | 901d528dd4a98a113cba3dce3050f50b21e00bcb | [
"MIT"
] | null | null | null | data/merge_claim_tweets_real.py | TIMAN-group/covid19_misinformation | 901d528dd4a98a113cba3dce3050f50b21e00bcb | [
"MIT"
] | 1 | 2020-12-03T03:02:24.000Z | 2020-12-03T03:02:24.000Z | #!/usr/bin/env python
import os
import pandas as pd
COAID_PATH = os.path.join(os.path.dirname(__file__), "..", "CoAID")
REAL_TWEET_PATH = os.getcwd()
df_claim_real_tweet = pd.read_csv(os.path.join(COAID_PATH, "ClaimRealCOVID-19_tweets.csv"))
df_claim_real_tweet["id"] = df_claim_real_tweet["tweet_id"]
df_claim_real_tweet = df_claim_real_tweet.drop(["tweet_id"], axis=1)
df_real_tweets = pd.read_csv(os.path.join(REAL_TWEET_PATH, "coaid_tweets_real.csv"))
df_real_tweet_claims = pd.merge(df_claim_real_tweet, df_real_tweets, on="id", how="left")
df_real_tweet_claims.to_csv(os.path.join(COAID_PATH, "ClaimRealCOVID-19_tweets_expanded.csv"), index=False)
df_claim_real_tweet_replies = pd.read_csv(os.path.join(COAID_PATH, "ClaimRealCOVID-19_tweets_replies.csv"))
df_claim_real_tweet_replies = df_claim_real_tweet_replies.drop(["tweet_id"], axis=1)
df_real_replies_claims = pd.merge(df_claim_real_tweet_replies, df_real_tweets,
left_on="reply_id",
right_on="id", how="left")
df_real_replies_claims = df_real_replies_claims.drop(["reply_id"], axis=1)
df_real_replies_claims.to_csv(os.path.join(COAID_PATH, "ClaimRealCOVID-19_replies_expanded.csv"), index=False)
| 41.266667 | 110 | 0.756058 |
1dd0ff26c40f2f14dd5085bf43833229ab971d3f | 205 | py | Python | build/lib.linux-x86_64-2.7/tornado_swirl/__init__.py | arthur-barbosa18/ecs-lib-tornado-swirl | 8d38d0c06242c70f58178f673f55033dfd066a78 | [
"MIT"
] | null | null | null | build/lib.linux-x86_64-2.7/tornado_swirl/__init__.py | arthur-barbosa18/ecs-lib-tornado-swirl | 8d38d0c06242c70f58178f673f55033dfd066a78 | [
"MIT"
] | null | null | null | build/lib.linux-x86_64-2.7/tornado_swirl/__init__.py | arthur-barbosa18/ecs-lib-tornado-swirl | 8d38d0c06242c70f58178f673f55033dfd066a78 | [
"MIT"
] | 1 | 2019-12-30T14:28:39.000Z | 2019-12-30T14:28:39.000Z | # -*- coding: utf-8 -*-
__version__ = '0.0.1'
__author__ = 'serena'
from tornado_swirl.settings import api_routes
from tornado_swirl.swagger import Application, describe, restapi, schema, add_global_tag
| 25.625 | 88 | 0.765854 |
c0bca476d5fdd9d9bfadda6489f021c899b6a520 | 695 | py | Python | qa/rpc-tests/create_cache.py | kazucoin/kazucoin-core | 1af2c5f9fa6240cffd68e1ab627339fe107a484a | [
"MIT"
] | 1 | 2019-06-02T17:20:49.000Z | 2019-06-02T17:20:49.000Z | qa/rpc-tests/create_cache.py | kazucoin/kazucoin-core | 1af2c5f9fa6240cffd68e1ab627339fe107a484a | [
"MIT"
] | null | null | null | qa/rpc-tests/create_cache.py | kazucoin/kazucoin-core | 1af2c5f9fa6240cffd68e1ab627339fe107a484a | [
"MIT"
] | 1 | 2018-12-23T23:33:18.000Z | 2018-12-23T23:33:18.000Z | #!/usr/bin/env python3
# Copyright (c) 2016 The Kazucoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helper script to create the cache
# (see KazucoinTestFramework.setup_chain)
#
from test_framework.test_framework import KazucoinTestFramework
class CreateCache(KazucoinTestFramework):
def __init__(self):
super().__init__()
# Test network and test nodes are not required:
self.num_nodes = 0
self.nodes = []
def setup_network(self):
pass
def run_test(self):
pass
if __name__ == '__main__':
CreateCache().main()
| 23.166667 | 69 | 0.700719 |
18d673531348102217e66138b6c87ecbf0f49f12 | 7,529 | py | Python | setup.py | liberix/taichi | bbd4ba13e021b32dbf9a52507f637aff0851fe39 | [
"MIT"
] | 1 | 2021-08-08T14:05:33.000Z | 2021-08-08T14:05:33.000Z | setup.py | liberix/taichi | bbd4ba13e021b32dbf9a52507f637aff0851fe39 | [
"MIT"
] | null | null | null | setup.py | liberix/taichi | bbd4ba13e021b32dbf9a52507f637aff0851fe39 | [
"MIT"
] | null | null | null | import glob
import multiprocessing
import os
import platform
import shutil
import subprocess
import sys
from setuptools import Extension, find_packages, setup
from setuptools.command.build_ext import build_ext
from setuptools.command.build_py import build_py
from setuptools.command.egg_info import egg_info
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Topic :: Software Development :: Compilers',
'Topic :: Multimedia :: Graphics',
'Topic :: Games/Entertainment :: Simulation',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
]
project_name = os.getenv('PROJECT_NAME', 'taichi')
TI_VERSION_MAJOR = 0
TI_VERSION_MINOR = 7
TI_VERSION_PATCH = 28
version = f'{TI_VERSION_MAJOR}.{TI_VERSION_MINOR}.{TI_VERSION_PATCH}'
data_files = glob.glob('python/lib/*')
print(data_files)
packages = find_packages('python')
print(packages)
# Our python package root dir is python/
package_dir = 'python'
def get_python_executable():
return sys.executable.replace('\\', '/')
def get_os_name():
name = platform.platform()
# in python 3.8, platform.platform() uses mac_ver() on macOS
# it will return 'macOS-XXXX' instead of 'Darwin-XXXX'
if name.lower().startswith('darwin') or name.lower().startswith('macos'):
return 'osx'
elif name.lower().startswith('windows'):
return 'win'
elif name.lower().startswith('linux'):
return 'linux'
assert False, "Unknown platform name %s" % name
def remove_tmp(taichi_dir):
shutil.rmtree(os.path.join(taichi_dir, 'assets'), ignore_errors=True)
shutil.rmtree(os.path.join(taichi_dir, 'examples'), ignore_errors=True)
shutil.rmtree(os.path.join(taichi_dir, 'tests'), ignore_errors=True)
class CMakeExtension(Extension):
def __init__(self, name):
Extension.__init__(self, name, sources=[])
class EggInfo(egg_info):
def run(self):
taichi_dir = os.path.join(package_dir, 'taichi')
remove_tmp(taichi_dir)
shutil.copytree('tests/python', os.path.join(taichi_dir, 'tests'))
shutil.copytree('examples', os.path.join(taichi_dir, 'examples'))
shutil.copytree('external/assets', os.path.join(taichi_dir, 'assets'))
egg_info.run(self)
# python setup.py build runs the following commands in order:
# python setup.py build_py
# python setup.py build_ext
class BuildPy(build_py):
def run(self):
build_py.run(self)
taichi_dir = os.path.join(package_dir, 'taichi')
remove_tmp(taichi_dir)
class CMakeBuild(build_ext):
def parse_cmake_args_from_env(self):
# Source: TAICHI_CMAKE_ARGS=... python setup.py ...
import shlex
cmake_args = os.getenv('TAICHI_CMAKE_ARGS', '')
return shlex.split(cmake_args.strip())
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
# CMakeLists.txt is in the same directory as this setup.py file
cmake_list_dir = os.path.abspath(os.path.dirname(__file__))
self.build_temp = os.path.join(cmake_list_dir, 'build')
build_directory = os.path.abspath(self.build_temp)
cmake_args = self.parse_cmake_args_from_env()
cmake_args += [
f'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={build_directory}',
f'-DPYTHON_EXECUTABLE={get_python_executable()}',
f'-DTI_VERSION_MAJOR={TI_VERSION_MAJOR}',
f'-DTI_VERSION_MINOR={TI_VERSION_MINOR}',
f'-DTI_VERSION_PATCH={TI_VERSION_PATCH}',
]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
# Assuming Makefiles
if get_os_name() != 'win':
num_threads = os.getenv('BUILD_NUM_THREADS',
multiprocessing.cpu_count())
build_args += ['--', f'-j{num_threads}']
self.build_args = build_args
env = os.environ.copy()
os.makedirs(self.build_temp, exist_ok=True)
print('-' * 10, 'Running CMake prepare', '-' * 40)
subprocess.check_call(['cmake', cmake_list_dir] + cmake_args,
cwd=self.build_temp,
env=env)
print('-' * 10, 'Building extensions', '-' * 40)
cmake_cmd = ['cmake', '--build', '.'] + self.build_args
subprocess.check_call(cmake_cmd, cwd=self.build_temp)
self.prepare_package()
def prepare_package(self):
# We need to make sure these additional files are ready for
# - develop mode: must exist in local python/taichi/lib/ folder
# - install mode: must exist in self.build_lib/taichi/lib
taichi_lib_dir = 'taichi/lib'
for target in (
os.path.join(package_dir, taichi_lib_dir),
os.path.join(self.build_lib, taichi_lib_dir),
):
shutil.rmtree(target, ignore_errors=True)
os.makedirs(target)
if get_os_name() == 'linux':
shutil.copy(os.path.join(self.build_temp, 'libtaichi_core.so'),
os.path.join(target, 'taichi_core.so'))
elif get_os_name() == 'osx':
shutil.copy(
os.path.join(self.build_temp, 'libtaichi_core.dylib'),
os.path.join(target, 'taichi_core.so'))
else:
shutil.copy('runtimes/Release/taichi_core.dll',
os.path.join(target, 'taichi_core.pyd'))
if get_os_name() != 'osx':
libdevice_path = 'external/cuda_libdevice/slim_libdevice.10.bc'
print("copying libdevice:", libdevice_path)
assert os.path.exists(libdevice_path)
shutil.copy(libdevice_path,
os.path.join(target, 'slim_libdevice.10.bc'))
llvm_runtime_dir = 'taichi/runtime/llvm'
for f in os.listdir(llvm_runtime_dir):
if f.startswith('runtime_') and f.endswith('.bc'):
print(f"Fetching runtime file {f} to {target} folder")
shutil.copy(os.path.join(llvm_runtime_dir, f), target)
setup(name=project_name,
packages=packages,
package_dir={"": package_dir},
version=version,
description='The Taichi Programming Language',
author='Taichi developers',
author_email='yuanmhu@gmail.com',
url='https://github.com/taichi-dev/taichi',
install_requires=[
'numpy',
'pybind11>=2.5.0',
'sourceinspect>=0.0.4',
'colorama',
'astor',
],
data_files=[('lib', data_files)],
keywords=['graphics', 'simulation'],
license='MIT',
include_package_data=True,
entry_points={
'console_scripts': [
'ti=taichi.main:main',
],
},
classifiers=classifiers,
ext_modules=[CMakeExtension('taichi_core')],
cmdclass=dict(egg_info=EggInfo, build_py=BuildPy, build_ext=CMakeBuild),
has_ext_modules=lambda: True)
| 35.018605 | 79 | 0.618542 |
0e1ac07c956691c128ac8f4c2517be792c059f54 | 6,499 | py | Python | openstack_dashboard/dashboards/project/data_processing/clusters/tabs.py | Tehsmash/horizon | 8ffade099a3a437509dcdcf25d5b054e5c188b61 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/project/data_processing/clusters/tabs.py | Tehsmash/horizon | 8ffade099a3a437509dcdcf25d5b054e5c188b61 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/project/data_processing/clusters/tabs.py | Tehsmash/horizon | 8ffade099a3a437509dcdcf25d5b054e5c188b61 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from horizon import tabs
from openstack_dashboard.dashboards.project. \
data_processing.utils import workflow_helpers as helpers
from openstack_dashboard.api import glance
from openstack_dashboard.api import network
from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
from openstack_dashboard.api import sahara as saharaclient
LOG = logging.getLogger(__name__)
class GeneralTab(tabs.Tab):
name = _("General Info")
slug = "cluster_details_tab"
template_name = "project/data_processing.clusters/_details.html"
def get_context_data(self, request):
cluster_id = self.tab_group.kwargs['cluster_id']
cluster_info = {}
try:
sahara = saharaclient.client(request)
cluster = sahara.clusters.get(cluster_id)
for info_key, info_val in cluster.info.items():
for key, val in info_val.items():
if str(val).startswith(('http://', 'https://')):
cluster.info[info_key][key] = build_link(val)
base_image = glance.image_get(request,
cluster.default_image_id)
if getattr(cluster, 'cluster_template_id', None):
cluster_template = helpers.safe_call(
sahara.cluster_templates.get,
cluster.cluster_template_id)
else:
cluster_template = None
if getattr(cluster, 'neutron_management_network', None):
net_id = cluster.neutron_management_network
network = neutron.network_get(request, net_id)
network.set_id_as_name_if_empty()
net_name = network.name
else:
net_name = None
cluster_info.update({"cluster": cluster,
"base_image": base_image,
"cluster_template": cluster_template,
"network": net_name})
except Exception as e:
LOG.error("Unable to fetch cluster details: %s" % str(e))
return cluster_info
def build_link(url):
return "<a href='" + url + "' target=\"_blank\">" + url + "</a>"
class NodeGroupsTab(tabs.Tab):
name = _("Node Groups")
slug = "cluster_nodegroups_tab"
template_name = (
"project/data_processing.clusters/_nodegroups_details.html")
def get_context_data(self, request):
cluster_id = self.tab_group.kwargs['cluster_id']
try:
sahara = saharaclient.client(request)
cluster = sahara.clusters.get(cluster_id)
for ng in cluster.node_groups:
if ng["flavor_id"]:
ng["flavor_name"] = (
nova.flavor_get(request, ng["flavor_id"]).name)
if ng["floating_ip_pool"]:
ng["floating_ip_pool_name"] = (
self._get_floating_ip_pool_name(
request, ng["floating_ip_pool"]))
if ng.get("node_group_template_id", None):
ng["node_group_template"] = helpers.safe_call(
sahara.node_group_templates.get,
ng["node_group_template_id"])
except Exception:
cluster = {}
exceptions.handle(request,
_("Unable to get node group details."))
return {"cluster": cluster}
def _get_floating_ip_pool_name(self, request, pool_id):
pools = [pool for pool in network.floating_ip_pools_list(
request) if pool.id == pool_id]
return pools[0].name if pools else pool_id
class Instance(object):
def __init__(self, name=None, id=None, internal_ip=None,
management_ip=None):
self.name = name
self.id = id
self.internal_ip = internal_ip
self.management_ip = management_ip
class InstancesTable(tables.DataTable):
name = tables.Column("name",
link=("horizon:project:instances:detail"),
verbose_name=_("Name"))
internal_ip = tables.Column("internal_ip",
verbose_name=_("Internal IP"))
management_ip = tables.Column("management_ip",
verbose_name=_("Management IP"))
class Meta:
name = "cluster_instances"
# Just ignoring the name.
verbose_name = _(" ")
class InstancesTab(tabs.TableTab):
name = _("Instances")
slug = "cluster_instances_tab"
template_name = "project/data_processing.clusters/_instances_details.html"
table_classes = (InstancesTable, )
def get_cluster_instances_data(self):
cluster_id = self.tab_group.kwargs['cluster_id']
try:
sahara = saharaclient.client(self.request)
cluster = sahara.clusters.get(cluster_id)
instances = []
for ng in cluster.node_groups:
for instance in ng["instances"]:
instances.append(Instance(
name=instance["instance_name"],
id=instance["instance_id"],
internal_ip=instance.get("internal_ip",
"Not assigned"),
management_ip=instance.get("management_ip",
"Not assigned")))
except Exception:
instances = []
exceptions.handle(self.request,
_("Unable to fetch instance details."))
return instances
class ClusterDetailsTabs(tabs.TabGroup):
slug = "cluster_details"
tabs = (GeneralTab, NodeGroupsTab, InstancesTab, )
sticky = True
| 35.513661 | 78 | 0.592707 |
0a8af9efc4c9ef5c7d5bdd359150fcd7b24774a2 | 401 | py | Python | SensorMonitor/wsgi.py | circuitar/SensorMonitor | 5d1bdaa448f1f52eac357782bb0eb7389e420c0f | [
"MIT"
] | 4 | 2015-03-24T20:15:06.000Z | 2021-04-27T16:53:37.000Z | SensorMonitor/wsgi.py | circuitar/SensorMonitor | 5d1bdaa448f1f52eac357782bb0eb7389e420c0f | [
"MIT"
] | null | null | null | SensorMonitor/wsgi.py | circuitar/SensorMonitor | 5d1bdaa448f1f52eac357782bb0eb7389e420c0f | [
"MIT"
] | 3 | 2017-01-15T08:32:26.000Z | 2020-12-16T12:58:40.000Z | """
WSGI config for SensorMonitor project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SensorMonitor.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 26.733333 | 78 | 0.795511 |
a5f895dd84c149ec9b60942d0167405ad276fb0a | 11,921 | py | Python | deutschland/lebensmittelwarnung/model/request_options.py | kiranmusze/deutschland | 86d8ead3f38ad88ad66bb338b9f5a8db06992344 | [
"Apache-2.0"
] | null | null | null | deutschland/lebensmittelwarnung/model/request_options.py | kiranmusze/deutschland | 86d8ead3f38ad88ad66bb338b9f5a8db06992344 | [
"Apache-2.0"
] | null | null | null | deutschland/lebensmittelwarnung/model/request_options.py | kiranmusze/deutschland | 86d8ead3f38ad88ad66bb338b9f5a8db06992344 | [
"Apache-2.0"
] | null | null | null | """
Lebensmittelwarnungen API
Liste aller Lebensmittel und Produktwarnungen # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from deutschland.lebensmittelwarnung.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from deutschland.lebensmittelwarnung.exceptions import ApiAttributeError
class RequestOptions(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
none_type,
) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"rows": (int,), # noqa: E501
"sort": (str,), # noqa: E501
"start": (int,), # noqa: E501
"fq": ([str],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"rows": "rows", # noqa: E501
"sort": "sort", # noqa: E501
"start": "start", # noqa: E501
"fq": "fq", # noqa: E501
}
read_only_vars = {}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""RequestOptions - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
rows (int): Anzahl zu ladender Einträge. [optional] # noqa: E501
sort (str): [optional] # noqa: E501
start (int): Start-Index der zu ladenden Einträge. [optional] # noqa: E501
fq ([str]): [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""RequestOptions - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
rows (int): Anzahl zu ladender Einträge. [optional] # noqa: E501
sort (str): [optional] # noqa: E501
start (int): Start-Index der zu ladenden Einträge. [optional] # noqa: E501
fq ([str]): [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(
f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes."
)
| 42.123675 | 106 | 0.557168 |
b4bafd6cca35dec480a200e152dba92e846ec20a | 990 | py | Python | app/app/urls.py | sibelkahraman/recipe-app-api | 2132ed21783ab1aaf57520a610505898abca14e5 | [
"MIT"
] | null | null | null | app/app/urls.py | sibelkahraman/recipe-app-api | 2132ed21783ab1aaf57520a610505898abca14e5 | [
"MIT"
] | null | null | null | app/app/urls.py | sibelkahraman/recipe-app-api | 2132ed21783ab1aaf57520a610505898abca14e5 | [
"MIT"
] | null | null | null | """app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('api/user/', include('user.urls')),
path('api/recipe/', include('recipe.urls')),
] + static(settings.MEDIAL_URL, document_root=settings.MEDIA_ROOT)
| 36.666667 | 77 | 0.716162 |
27323c0d4945cfd136c2c38c20faf2eec33874f1 | 571 | py | Python | botfriend/tests/__init__.py | 0xdc/botfriend | 6157a873c4158ccfdda4bf021059bddf14217654 | [
"MIT"
] | 39 | 2017-06-19T16:12:34.000Z | 2022-03-02T10:06:29.000Z | botfriend/tests/__init__.py | 0xdc/botfriend | 6157a873c4158ccfdda4bf021059bddf14217654 | [
"MIT"
] | null | null | null | botfriend/tests/__init__.py | 0xdc/botfriend | 6157a873c4158ccfdda4bf021059bddf14217654 | [
"MIT"
] | 5 | 2018-08-27T19:49:56.000Z | 2020-10-22T02:31:04.000Z | import sys
import os
from nose.tools import set_trace
# Add the parent directory to the path so that import statements will work
# the same in tests as in code.
this_dir = os.path.abspath(os.path.dirname(__file__))
parent = os.path.split(this_dir)[0]
sys.path.insert(0, parent)
# Having problems with the database not being initialized? This module is
# being imported twice through two different paths. Uncomment this
# set_trace() and see where the second one is happening.
#
# set_trace()
from testing import (
DatabaseTest,
package_setup
)
package_setup()
| 25.954545 | 74 | 0.765324 |
819e5549d05ae0c6274b1a2d09ae27f3fc46e7d6 | 1,324 | py | Python | pandas/tests/extension/arrow/test_timestamp.py | oricou/pandas | 9405e58d9268041f5416711c051cf5429a19bf49 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2021-05-07T04:58:36.000Z | 2021-05-07T04:58:59.000Z | pandas/tests/extension/arrow/test_timestamp.py | oricou/pandas | 9405e58d9268041f5416711c051cf5429a19bf49 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/extension/arrow/test_timestamp.py | oricou/pandas | 9405e58d9268041f5416711c051cf5429a19bf49 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2021-06-16T07:19:12.000Z | 2021-12-16T10:24:44.000Z | from __future__ import annotations
import datetime
from typing import Type
import pytest
import pandas as pd
from pandas.api.extensions import (
ExtensionDtype,
register_extension_dtype,
)
pytest.importorskip("pyarrow", minversion="0.13.0")
import pyarrow as pa # isort:skip
from pandas.tests.extension.arrow.arrays import ArrowExtensionArray # isort:skip
@register_extension_dtype
class ArrowTimestampUSDtype(ExtensionDtype):
type = datetime.datetime
kind = "M"
name = "arrow_timestamp_us"
na_value = pa.NULL
@classmethod
def construct_array_type(cls) -> Type[ArrowTimestampUSArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return ArrowTimestampUSArray
class ArrowTimestampUSArray(ArrowExtensionArray):
def __init__(self, values):
if not isinstance(values, pa.ChunkedArray):
raise ValueError
assert values.type == pa.timestamp("us")
self._data = values
self._dtype = ArrowTimestampUSDtype()
def test_constructor_extensionblock():
# GH 34986
pd.DataFrame(
{
"timestamp": ArrowTimestampUSArray.from_scalars(
[None, datetime.datetime(2010, 9, 8, 7, 6, 5, 4)]
)
}
)
| 22.066667 | 81 | 0.660876 |
3377d1b446b2897e756bbfd986538cb7632e88b8 | 995 | py | Python | b_tree/binary_search_tree/bst_nodes_in_a_range.py | rjsnh1522/geeks-4-geeks-python | 9bea0ce4f3fae9b5f9e5952fb5b4b3a8c6186cf4 | [
"MIT"
] | null | null | null | b_tree/binary_search_tree/bst_nodes_in_a_range.py | rjsnh1522/geeks-4-geeks-python | 9bea0ce4f3fae9b5f9e5952fb5b4b3a8c6186cf4 | [
"MIT"
] | 5 | 2021-03-10T11:49:39.000Z | 2022-02-27T01:35:59.000Z | b_tree/binary_search_tree/bst_nodes_in_a_range.py | rjsnh1522/geeks-4-geeks-python | 9bea0ce4f3fae9b5f9e5952fb5b4b3a8c6186cf4 | [
"MIT"
] | null | null | null | # Definition for a binary tree node
# NOT_SOLVED
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def __init__(self):
self.count = 0
def solve(self, A, B, C):
in_order = self.in_order_traversal(A)
index_c = -1
index_b = -1
for i in range(len(in_order)):
if in_order[i] == B:
index_b = i
if in_order[i] == C:
index_c = i
return abs(index_c - index_b) + 1
def in_order_traversal(self, root):
stack = []
answer = []
if not root:
return answer
while True:
if root:
stack.append(root)
root = root.left
elif len(stack) > 0:
pop = stack.pop(-1)
answer.append(pop.val)
root = pop.right
else:
break
return answer
| 22.111111 | 45 | 0.467337 |
f0b5a6489e0dee063254c6cfaac2017a25107489 | 9,831 | py | Python | cupy/testing/_helper.py | Onkar627/cupy | 8eef1ad5393c0a92c5065bc05137bf997f37044a | [
"MIT"
] | null | null | null | cupy/testing/_helper.py | Onkar627/cupy | 8eef1ad5393c0a92c5065bc05137bf997f37044a | [
"MIT"
] | null | null | null | cupy/testing/_helper.py | Onkar627/cupy | 8eef1ad5393c0a92c5065bc05137bf997f37044a | [
"MIT"
] | 1 | 2022-03-21T20:19:12.000Z | 2022-03-21T20:19:12.000Z | import contextlib
import inspect
from typing import Callable
import unittest
from unittest import mock
import warnings
import numpy
import cupy
from cupy._core import internal
import cupyx
import cupyx.scipy.sparse
from cupy.testing._pytest_impl import is_available
if is_available():
import pytest
_skipif: Callable[..., Callable[[Callable], Callable]] = pytest.mark.skipif
else:
_skipif = unittest.skipIf
def with_requires(*requirements):
"""Run a test case only when given requirements are satisfied.
.. admonition:: Example
This test case runs only when `numpy>=1.18` is installed.
>>> from cupy import testing
... class Test(unittest.TestCase):
... @testing.with_requires('numpy>=1.18')
... def test_for_numpy_1_18(self):
... pass
Args:
requirements: A list of string representing requirement condition to
run a given test case.
"""
msg = 'requires: {}'.format(','.join(requirements))
return _skipif(not installed(requirements), reason=msg)
def installed(*specifiers):
"""Returns True if the current environment satisfies the specified
package requirement.
Args:
specifiers: Version specifiers (e.g., `numpy>=1.20.0`).
"""
# Delay import of pkg_resources because it is excruciatingly slow.
# See https://github.com/pypa/setuptools/issues/510
import pkg_resources
for spec in specifiers:
try:
pkg_resources.require(spec)
except pkg_resources.ResolutionError:
return False
return True
def numpy_satisfies(version_range):
"""Returns True if numpy version satisfies the specified criteria.
Args:
version_range: A version specifier (e.g., `>=1.13.0`).
"""
return installed('numpy{}'.format(version_range))
def shaped_arange(shape, xp=cupy, dtype=numpy.float32, order='C'):
"""Returns an array with given shape, array module, and dtype.
Args:
shape(tuple of int): Shape of returned ndarray.
xp(numpy or cupy): Array module to use.
dtype(dtype): Dtype of returned ndarray.
order({'C', 'F'}): Order of returned ndarray.
Returns:
numpy.ndarray or cupy.ndarray:
The array filled with :math:`1, \\cdots, N` with specified dtype
with given shape, array module. Here, :math:`N` is
the size of the returned array.
If ``dtype`` is ``numpy.bool_``, evens (resp. odds) are converted to
``True`` (resp. ``False``).
"""
dtype = numpy.dtype(dtype)
a = numpy.arange(1, internal.prod(shape) + 1, 1)
if dtype == '?':
a = a % 2 == 0
elif dtype.kind == 'c':
a = a + a * 1j
return xp.array(a.astype(dtype).reshape(shape), order=order)
def shaped_reverse_arange(shape, xp=cupy, dtype=numpy.float32):
"""Returns an array filled with decreasing numbers.
Args:
shape(tuple of int): Shape of returned ndarray.
xp(numpy or cupy): Array module to use.
dtype(dtype): Dtype of returned ndarray.
Returns:
numpy.ndarray or cupy.ndarray:
The array filled with :math:`N, \\cdots, 1` with specified dtype
with given shape, array module.
Here, :math:`N` is the size of the returned array.
If ``dtype`` is ``numpy.bool_``, evens (resp. odds) are converted to
``True`` (resp. ``False``).
"""
dtype = numpy.dtype(dtype)
size = internal.prod(shape)
a = numpy.arange(size, 0, -1)
if dtype == '?':
a = a % 2 == 0
elif dtype.kind == 'c':
a = a + a * 1j
return xp.array(a.astype(dtype).reshape(shape))
def shaped_random(
shape, xp=cupy, dtype=numpy.float32, scale=10, seed=0, order='C'):
"""Returns an array filled with random values.
Args:
shape(tuple): Shape of returned ndarray.
xp(numpy or cupy): Array module to use.
dtype(dtype): Dtype of returned ndarray.
scale(float): Scaling factor of elements.
seed(int): Random seed.
Returns:
numpy.ndarray or cupy.ndarray: The array with
given shape, array module,
If ``dtype`` is ``numpy.bool_``, the elements are
independently drawn from ``True`` and ``False``
with same probabilities.
Otherwise, the array is filled with samples
independently and identically drawn
from uniform distribution over :math:`[0, scale)`
with specified dtype.
"""
numpy.random.seed(seed)
dtype = numpy.dtype(dtype)
if dtype == '?':
a = numpy.random.randint(2, size=shape)
elif dtype.kind == 'c':
a = numpy.random.rand(*shape) + 1j * numpy.random.rand(*shape)
a *= scale
else:
a = numpy.random.rand(*shape) * scale
return xp.asarray(a, dtype=dtype, order=order)
def shaped_sparse_random(
shape, sp=cupyx.scipy.sparse, dtype=numpy.float32,
density=0.01, format='coo', seed=0):
"""Returns an array filled with random values.
Args:
shape (tuple): Shape of returned sparse matrix.
sp (scipy.sparse or cupyx.scipy.sparse): Sparce matrix module to use.
dtype (dtype): Dtype of returned sparse matrix.
density (float): Density of returned sparse matrix.
format (str): Format of returned sparse matrix.
seed (int): Random seed.
Returns:
The sparse matrix with given shape, array module,
"""
import scipy.sparse
n_rows, n_cols = shape
numpy.random.seed(seed)
a = scipy.sparse.random(n_rows, n_cols, density).astype(dtype)
if sp is cupyx.scipy.sparse:
a = cupyx.scipy.sparse.coo_matrix(a)
elif sp is not scipy.sparse:
raise ValueError('Unknown module: {}'.format(sp))
return a.asformat(format)
def generate_matrix(
shape, xp=cupy, dtype=numpy.float32, *, singular_values=None):
r"""Returns a matrix with specified singular values.
Generates a random matrix with given singular values.
This function generates a random NumPy matrix (or a stack of matrices) that
has specified singular values. It can be used to generate the inputs for a
test that can be instable when the input value behaves bad.
Notation: denote the shape of the generated array by :math:`(B..., M, N)`,
and :math:`K = min\{M, N\}`. :math:`B...` may be an empty sequence.
Args:
shape (tuple of int): Shape of the generated array, i.e.,
:math:`(B..., M, N)`.
xp (numpy or cupy): Array module to use.
dtype: Dtype of the generated array.
singular_values (array-like): Singular values of the generated
matrices. It must be broadcastable to shape :math:`(B..., K)`.
Returns:
numpy.ndarray or cupy.ndarray: A random matrix that has specifiec
singular values.
"""
if len(shape) <= 1:
raise ValueError(
'shape {} is invalid for matrices: too few axes'.format(shape)
)
if singular_values is None:
raise TypeError('singular_values is not given')
singular_values = xp.asarray(singular_values)
dtype = numpy.dtype(dtype)
if dtype.kind not in 'fc':
raise TypeError('dtype {} is not supported'.format(dtype))
if not xp.isrealobj(singular_values):
raise TypeError('singular_values is not real')
if (singular_values < 0).any():
raise ValueError('negative singular value is given')
# Generate random matrices with given singular values. We simply generate
# orthogonal vectors using SVD on random matrices and then combine them
# with the given singular values.
a = xp.random.randn(*shape)
if dtype.kind == 'c':
a = a + 1j * xp.random.randn(*shape)
u, s, vh = xp.linalg.svd(a, full_matrices=False)
sv = xp.broadcast_to(singular_values, s.shape)
a = xp.einsum('...ik,...k,...kj->...ij', u, sv, vh)
return a.astype(dtype)
@contextlib.contextmanager
def assert_warns(expected):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
yield
if any(isinstance(m.message, expected) for m in w):
return
try:
exc_name = expected.__name__
except AttributeError:
exc_name = str(expected)
raise AssertionError('%s not triggerred' % exc_name)
class NumpyAliasTestBase(unittest.TestCase):
@property
def func(self):
raise NotImplementedError()
@property
def cupy_func(self):
return getattr(cupy, self.func)
@property
def numpy_func(self):
return getattr(numpy, self.func)
class NumpyAliasBasicTestBase(NumpyAliasTestBase):
def test_argspec(self):
f = inspect.signature
assert f(self.cupy_func) == f(self.numpy_func)
def test_docstring(self):
cupy_func = self.cupy_func
numpy_func = self.numpy_func
assert hasattr(cupy_func, '__doc__')
assert cupy_func.__doc__ is not None
assert cupy_func.__doc__ != ''
assert cupy_func.__doc__ is not numpy_func.__doc__
class NumpyAliasValuesTestBase(NumpyAliasTestBase):
def test_values(self):
assert self.cupy_func(*self.args) == self.numpy_func(*self.args)
@contextlib.contextmanager
def assert_function_is_called(*args, times_called=1, **kwargs):
"""A handy wrapper for unittest.mock to check if a function is called.
Args:
*args: Arguments of `mock.patch`.
times_called (int): The number of times the function should be
called. Default is ``1``.
**kwargs: Keyword arguments of `mock.patch`.
"""
with mock.patch(*args, **kwargs) as handle:
yield
assert handle.call_count == times_called
# TODO(kataoka): remove this alias
AssertFunctionIsCalled = assert_function_is_called
| 31.012618 | 79 | 0.644594 |
bee54213fdb90c22607fdc8141da14d570deb89e | 3,702 | py | Python | BackEnd/api/service/evaluation.py | camilleAmaury/X5GON_project | 8d5b61eb45a357fe1881c0523389d463724c6448 | [
"Unlicense"
] | 1 | 2021-05-02T14:24:38.000Z | 2021-05-02T14:24:38.000Z | BackEnd/api/service/evaluation.py | camilleAmaury/X5GON_project | 8d5b61eb45a357fe1881c0523389d463724c6448 | [
"Unlicense"
] | 1 | 2022-02-10T00:48:47.000Z | 2022-02-10T00:48:47.000Z | BackEnd/api/service/evaluation.py | camilleAmaury/X5GON_project | 8d5b61eb45a357fe1881c0523389d463724c6448 | [
"Unlicense"
] | null | null | null | from flask import abort, jsonify, make_response
from . import user as user_service
from api.database import db
from api.database.model import User, Document, Evaluation
from . import user as userService
from .event import badge_possession_verification
def build_evaluation_schema(evaluation):
mod = {}
mod['user_id'] = evaluation.user_id
mod['graph_ref'] = evaluation.graph_ref
mod['comprehension_rating'] = evaluation.comprehension_rating
mod['quality_rating'] = evaluation.quality_rating
return mod
def get_evaluation(user_id, graph_ref):
evaluation = Evaluation.query.filter_by(user_id=user_id, graph_ref=graph_ref).first()
if not evaluation:
abort(make_response(jsonify({
"errors":{
0:"Evaluation not found"
},
"message":"Evaluation not found"
}), 409))
return build_evaluation_schema(evaluation)
def add_evaluation(data):
user = User.query.get(data.get('user_id'))
if not user:
abort(make_response(jsonify({
"errors":{
0:"User not found"
},
"message":"User not found"
}), 409))
document = Document.query.filter_by(graph_ref=data.get('graph_ref')).first()
if not document:
abort(make_response(jsonify({
"errors":{
0:"Document not found"
},
"message":"Document not found"
}), 409))
if not document in user.get_opened_documents() :
abort(make_response(jsonify({
"errors":{
0:"Document not opened by this user"
},
"message":"Document not opened"
}), 409))
if document in user.get_validated_documents() :
abort(make_response(jsonify({
"errors":{
0:"Document already validated by this user"
},
"message":"Document already validated"
}), 409))
evaluation = Evaluation.query.filter_by(user_id=data.get('user_id'), graph_ref=data.get('graph_ref')).first()
if not evaluation:
evaluation = Evaluation(
comprehension_rating=data.get('comprehension_rating'),
quality_rating=data.get('quality_rating'),
user_id=data.get('user_id'),
graph_ref=data.get('graph_ref')
)
db.session.add(evaluation)
user_service.add_validated_document(evaluation.user_id, evaluation.graph_ref)
userService.add_user_experience(evaluation.user_id, 30)
db.session.flush()
db.session.commit()
badge_possession_verification(evaluation.user_id, 'Knowledge architect', {})
return build_evaluation_schema(evaluation)
def remove_evaluation(user_id, graph_ref):
user = User.query.get(user_id)
if not user:
abort(make_response(jsonify({
"errors":{
0:"User not found"
},
"message":"User not found"
}), 409))
document = Document.query.filter_by(graph_ref=graph_ref).first()
if not document:
abort(make_response(jsonify({
"errors":{
0:"Document not found"
},
"message":"Document not found"
}), 409))
evaluation = Evaluation.query.filter_by(user_id=user_id, graph_ref=graph_ref).first()
if not evaluation:
abort(make_response(jsonify({
"errors":{
0:"Evaluation not found"
},
"message":"Evaluation not found"
}), 409))
user.remove_document_evaluation(evaluation)
document.remove_user_evaluation(evaluation)
db.session.delete(evaluation)
db.session.commit()
return True
| 34.277778 | 113 | 0.613722 |
6f4da346853dc8ac999dfcd0c880fa996a31b382 | 320 | py | Python | home/migrations/0011_remove_team_position.py | nurpeiis/RED | 0e1f61488f79283749ec11b5d0e5b066dd02bd68 | [
"MIT"
] | null | null | null | home/migrations/0011_remove_team_position.py | nurpeiis/RED | 0e1f61488f79283749ec11b5d0e5b066dd02bd68 | [
"MIT"
] | 12 | 2019-02-03T07:54:32.000Z | 2022-03-11T23:33:19.000Z | home/migrations/0011_remove_team_position.py | nurpeiis/RED | 0e1f61488f79283749ec11b5d0e5b066dd02bd68 | [
"MIT"
] | 2 | 2018-12-28T11:38:17.000Z | 2019-09-11T22:45:04.000Z | # Generated by Django 2.1.3 on 2019-05-05 07:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0010_team_position'),
]
operations = [
migrations.RemoveField(
model_name='team',
name='position',
),
]
| 17.777778 | 47 | 0.58125 |
05f2732e918e12b915e85fdbf19a7fbe33517456 | 718 | py | Python | BOJ/graph_boj/dfs_reference.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | BOJ/graph_boj/dfs_reference.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | BOJ/graph_boj/dfs_reference.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | # 음료수 얼려 먹기
"""
n * m 크기의 얼음 틀이 있다. 구멍이 뚫려 있는 부분은 0, 칸막이가 존재하는 부분은 1로 표시된다.
구멍이 뚫려 있는 부분끼리 상, 하, 좌, 우로 붙어있는 경우 서로 연결되어있는것으로 간주한다.
이때 얼음 틀의 모양이 주어졌을 때 생성되는 총 아이스크림의 개수를 구하는 프로그램을 작성하라
"""
dx = [-1, 0, 1, 0]
dy = [0, -1, 0, 1]
def dfs(x, y):
if x <= -1 or x >= n or y <= -1 or y >= m:
return False
if graph[x][y] == 0: # 나머지 1은 모두 False처리
graph[x][y] = 1
for i in range(4):
dfs(x + dx[i], y + dy[i])
return True
return False
n, m = map(int, input().split())
graph = []
for i in range(n):
graph.append(list(map(int, list(input()))))
print(graph)
res = 0
for i in range(n):
for j in range(m):
if dfs(i, j):
res += 1
print(res)
| 18.410256 | 59 | 0.512535 |
42e215ea64e85d9cb2c85199015ce4f156d4bdd0 | 5,429 | py | Python | getsub/downloader/subhd.py | nicolaszf/GetSubtitles | 34458cc1c853bab6fa238eff073162b83ba2c0e6 | [
"MIT"
] | null | null | null | getsub/downloader/subhd.py | nicolaszf/GetSubtitles | 34458cc1c853bab6fa238eff073162b83ba2c0e6 | [
"MIT"
] | null | null | null | getsub/downloader/subhd.py | nicolaszf/GetSubtitles | 34458cc1c853bab6fa238eff073162b83ba2c0e6 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import print_function
import time
import json
import re
from contextlib import closing
from collections import OrderedDict as order_dict
import requests
from bs4 import BeautifulSoup
from getsub.downloader.downloader import Downloader
from getsub.sys_global_var import py, prefix
from getsub.progress_bar import ProgressBar
''' SubHD 字幕下载器
'''
class SubHDDownloader(Downloader):
name = 'subhd'
choice_prefix = '[SUBHD]'
site_url = 'https://subhd.tv'
search_url = 'https://subhd.tv/search/'
def get_subtitles(self, video_name, sub_num=5):
print(prefix + ' Searching SUBHD...', end='\r')
keywords, info_dict = Downloader.get_keywords(video_name)
keyword = ' '.join(keywords)
sub_dict = order_dict()
s = requests.session()
while True:
# 当前关键字查询
r = s.get(SubHDDownloader.search_url + keyword,
headers=Downloader.header, timeout=10)
bs_obj = BeautifulSoup(r.text, 'html.parser')
try:
if py == 2:
small_text = bs_obj.find('small').text.encode('utf8')
else:
small_text = bs_obj.find('small').text
except AttributeError as e:
char_error = 'The URI you submitted has disallowed characters'
if char_error in bs_obj.text:
print(prefix + ' [SUBHD ERROR] '
+ char_error + ': ' + keyword)
return sub_dict
# 搜索验证按钮
time.sleep(2)
continue
if '总共 0 条' not in small_text:
for one_box in bs_obj.find_all('div', {'class': 'box'}):
if info_dict['type'] == 'movie' \
and not one_box.find('div', {'class': 'movielist'}):
continue
a = one_box.find('div', {'class': 'd_title'}).find('a')
sub_url = SubHDDownloader.site_url + a.attrs['href']
sub_name = SubHDDownloader.choice_prefix + a.text.encode('utf8') if py == 2 \
else SubHDDownloader.choice_prefix + a.text
if py == 2:
text = one_box.text.encode('utf8')
else:
text = one_box.text
if '/ar' in a.attrs['href']:
type_score = 0
type_score += ('英文' in text) * 1
type_score += ('繁体' in text) * 2
type_score += ('简体' in text) * 4
type_score += ('双语' in text) * 8
sub_dict[sub_name] = {
'lan': type_score,
'link': sub_url,
'session': None
}
if len(sub_dict) >= sub_num:
del keywords[:] # 字幕条数达到上限,清空keywords
break
if len(keywords) > 1: # 字幕数未满,更换关键词继续查询
keyword = keyword.replace(keywords[-1], '')
keywords.pop(-1)
continue
break
if (len(sub_dict.items()) > 0
and list(sub_dict.items())[0][1]['lan'] < 8):
# 第一个候选字幕没有双语
sub_dict = order_dict(
sorted(sub_dict.items(),
key=lambda e: e[1]['lan'], reverse=True)
)
return sub_dict
def download_file(self, file_name, sub_url, session=None):
sid = sub_url.split('/')[-1]
r = requests.get(sub_url, headers=Downloader.header)
bs_obj = BeautifulSoup(r.text, 'html.parser')
dtoken = bs_obj.find('button', {'id': 'down'})['dtoken']
r = requests.post(SubHDDownloader.site_url + '/ajax/down_ajax',
data={'sub_id': sid, 'dtoken': dtoken},
headers=Downloader.header)
content = r.content.decode('unicode-escape')
if json.loads(content)['success'] is False:
msg = 'download too frequently with subhd downloader,' + \
' please change to other downloaders'
return None, None, msg
res = re.search('http:.*(?=")', r.content.decode('unicode-escape'))
download_link = res.group(0).replace('\\/', '/')
try:
with closing(requests.get(download_link, stream=True)) as response:
chunk_size = 1024 # 单次请求最大值
# 内容体总大小
content_size = int(response.headers['content-length'])
bar = ProgressBar(prefix + ' Get',
file_name.strip(), content_size)
sub_data_bytes = b''
for data in response.iter_content(chunk_size=chunk_size):
sub_data_bytes += data
bar.refresh(len(sub_data_bytes))
# sub_data_bytes = requests.get(download_link, timeout=10).content
except requests.Timeout:
return None, None, 'false'
if 'rar' in download_link:
datatype = '.rar'
elif 'zip' in download_link:
datatype = '.zip'
elif '7z' in download_link:
datatype = '.7z'
else:
datatype = 'Unknown'
return datatype, sub_data_bytes, ''
| 37.184932 | 97 | 0.504697 |
d24ad9c8ecbe3259091b402be1e94a52509fd928 | 5,024 | py | Python | tests/unit/test_ecsclient.py | lipak2345/python-ecsclient | d3f61497348f7c89c760c036d010aa6ca1317c22 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_ecsclient.py | lipak2345/python-ecsclient | d3f61497348f7c89c760c036d010aa6ca1317c22 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_ecsclient.py | lipak2345/python-ecsclient | d3f61497348f7c89c760c036d010aa6ca1317c22 | [
"Apache-2.0"
] | null | null | null | import unittest
import mock
from ecsclient import baseclient
from ecsclient import v2, v3
from ecsclient.client import Client
from ecsclient.common.exceptions import ECSClientException
class TestEcsClient(unittest.TestCase):
def test_verify_attributes(self):
c = baseclient.Client(username='someone',
password='password',
ecs_endpoint='http://127.0.0.1:4443',
token_endpoint='http://127.0.0.1:4443/login')
attributes = ['token_endpoint',
'username',
'password',
'token',
'ecs_endpoint',
'verify_ssl',
'token_path',
'request_timeout',
'cache_token',
'_session',
'_token_request',
'authentication']
for attr in attributes:
self.assertTrue(hasattr(c, attr))
def test_client_without_version(self):
with self.assertRaises(RuntimeError) as error:
Client(username='user',
password='password',
ecs_endpoint='https://192.168.1.10',
token_endpoint='https://192.168.10/login')
exception = error.exception
self.assertIn('Please provide the API version', str(exception))
def test_client_unsupported_version(self):
with self.assertRaises(RuntimeError) as error:
Client(version='10',
username='user',
password='password',
ecs_endpoint='https://192.168.1.10',
token_endpoint='https://192.168.10/login')
exception = error.exception
self.assertEqual("No client available for version '10'", str(exception))
def test_client_without_ecs_endpoint(self):
with self.assertRaises(ECSClientException) as error:
Client(version='3',
username='user',
password='password',
token_endpoint='https://192.168.10/login')
exception = error.exception.message
self.assertEqual("Missing 'ecs_endpoint'", str(exception))
@mock.patch('ecsclient.baseclient.os.path.isfile')
def test_client_without_token_endpoint(self, mock_isfile):
mock_isfile.return_value = False
with self.assertRaises(ECSClientException) as error:
Client(version='3',
username='user',
password='password',
ecs_endpoint='https://192.168.1.10')
exception = error.exception.message
mock_isfile.assert_called_with('/tmp/ecsclient.tkn')
self.assertEqual("'token_endpoint' not provided and missing 'token'|'token_path'", str(exception))
def test_client_without_credentials(self):
with self.assertRaises(ECSClientException) as error:
Client(version='3',
ecs_endpoint='https://192.168.1.10',
token_endpoint='https://192.168.10/login')
exception = error.exception.message
self.assertEqual("'token_endpoint' provided but missing ('username','password')", str(exception))
def test_client_v3_class(self):
c = Client(version='3',
username='user',
password='password',
ecs_endpoint='https://192.168.1.10',
token_endpoint='https://192.168.10/login')
self.assertIsInstance(c, v3.client.Client, 'Instance is not a v3 client class')
def test_client_v2_class(self):
c = Client(version='2',
username='user',
password='password',
ecs_endpoint='https://192.168.1.10',
token_endpoint='https://192.168.10/login')
self.assertIsInstance(c, v2.client.Client, 'Instance is not a v2 client class')
def test_client_init_with_credentials(self):
c = Client(version='3',
username='user',
password='password',
token_endpoint='https://192.168.10/login',
ecs_endpoint='https://192.168.1.10')
self.assertTrue(hasattr(c, 'username'))
self.assertTrue(hasattr(c, 'password'))
self.assertTrue(hasattr(c, 'token_endpoint'))
def test_client_init_with_token(self):
c = Client(version='3',
token='1234567890',
ecs_endpoint='https://192.168.1.10')
self.assertTrue(hasattr(c, 'token'))
@mock.patch('ecsclient.baseclient.os.path.isfile')
def test_client_init_with_token_path(self, mock_isfile):
mock_isfile.return_value = True
c = Client(version='3',
token_path='/tmp/mytoken.tkn',
ecs_endpoint='https://192.168.1.10')
self.assertTrue(hasattr(c, 'token_path'))
mock_isfile.assert_called_with('/tmp/mytoken.tkn')
| 41.866667 | 106 | 0.572452 |
b19e5db315dfdf22f0ff0984ca7a84481162ad5e | 2,815 | py | Python | tests/fireworks/user_objects/firetasks/test_ssh_tasks.py | IMTEK-Simulation/imteksimfw | 887275920ea864b02ae5149a7c85c656ab5f34b6 | [
"MIT"
] | null | null | null | tests/fireworks/user_objects/firetasks/test_ssh_tasks.py | IMTEK-Simulation/imteksimfw | 887275920ea864b02ae5149a7c85c656ab5f34b6 | [
"MIT"
] | null | null | null | tests/fireworks/user_objects/firetasks/test_ssh_tasks.py | IMTEK-Simulation/imteksimfw | 887275920ea864b02ae5149a7c85c656ab5f34b6 | [
"MIT"
] | null | null | null | # coding: utf-8
"""Test dtool integration.
To see verbose logging during testing, run something like
import logging
import unittest
from imteksimfw.fireworks.user_objects.firetasks.tests.test_dtool_tasks import DtoolTasksTest
logging.basicConfig(level=logging.DEBUG)
suite = unittest.TestSuite()
suite.addTest(DtoolTasksTest('name_of_the_desired_test'))
runner = unittest.TextTestRunner()
runner.run(suite)
"""
__author__ = 'Johannes Laurin Hoermann'
__copyright__ = 'Copyright 2020, IMTEK Simulation, University of Freiburg'
__email__ = 'johannes.hoermann@imtek.uni-freiburg.de, johannes.laurin@gmail.com'
__date__ = 'May 7, 2020'
import logging
import unittest
import os
import tempfile
import threading
import time
import mockssh
# needs dtool cli for verification
from imteksimfw.fireworks.user_objects.firetasks.ssh_tasks import SSHForwardTask
module_dir = os.path.abspath(os.path.dirname(__file__))
class SSHTasksTest(unittest.TestCase):
def setUp(self):
# logger = logging.getLogger(__name__)
self._tmpdir = tempfile.TemporaryDirectory()
self._previous_working_directory = os.getcwd()
os.chdir(self._tmpdir.name)
self.default_ssh_forward_task_spec = {
'remote_host': 'localhost',
'remote_port': 80,
'ssh_host': 'localhost',
'ssh_user': 'testuser',
'local_port': None, # automatic allocation
'port_file': '.port',
# 'ssh_port': 22,
'ssh_keyfile': os.path.join(module_dir, "id_rsa"),
}
self.default_ssh_user = {'testuser': os.path.join(module_dir, "id_rsa")}
def tearDown(self):
os.chdir(self._previous_working_directory)
self._tmpdir.cleanup()
def test_ssh_forward_task_run(self):
"""Establish ssh forwarding via mock ssh server."""
logger = logging.getLogger(__name__)
with mockssh.Server(self.default_ssh_user) as s:
ssh_port = s.port
logger.info("Started mock ssh server for users '{}' at port {}."
.format(s.users, ssh_port))
logger.debug("Instantiate SSHForwardTask with '{}'".format(
{**self.default_ssh_forward_task_spec, 'ssh_port': ssh_port}))
t = SSHForwardTask(ssh_port=ssh_port, **self.default_ssh_forward_task_spec)
e = threading.Event()
t.set_stop_event(e)
thread = threading.Thread(target=t.run_task, kwargs={'fw_spec': {}})
thread.start()
logger.info("Started thread '{}.'".format(thread.name))
time.sleep(5)
logger.info("Send stop signal.")
e.set()
thread.join()
logger.info("Thread ended.")
if __name__ == '__main__':
unittest.main()
| 33.117647 | 97 | 0.653641 |
e70a29af57b6b5f1916a9656b15340fd38ecec58 | 59,065 | py | Python | pytorch_pretrained_bert/modeling_transfo_xl.py | z422684562/pytorch-pretrained-BERT | 5cd0a3db927b3b9232187674aad98d04f505cece | [
"Apache-2.0"
] | 12 | 2022-01-27T04:20:10.000Z | 2022-02-25T07:20:48.000Z | pytorch_pretrained_bert/modeling_transfo_xl.py | z422684562/pytorch-pretrained-BERT | 5cd0a3db927b3b9232187674aad98d04f505cece | [
"Apache-2.0"
] | null | null | null | pytorch_pretrained_bert/modeling_transfo_xl.py | z422684562/pytorch-pretrained-BERT | 5cd0a3db927b3b9232187674aad98d04f505cece | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Transformer XL model.
Adapted from https://github.com/kimiyoung/transformer-xl.
In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import collections
import sys
from io import open
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .modeling import BertLayerNorm as LayerNorm
from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax, sample_logits
from .file_utils import cached_path, CONFIG_NAME, WEIGHTS_NAME
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-pytorch_model.bin",
}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-config.json",
}
TF_WEIGHTS_NAME = 'model.ckpt'
def build_tf_to_pytorch_map(model, config):
""" A map of modules from TF to PyTorch.
This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, 'transformer'):
# We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
tf_to_pt_map.update({
"transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
"transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias})
for i, (out_l, proj_l, tie_proj) in enumerate(zip(
model.crit.out_layers,
model.crit.out_projs,
config.tie_projs)):
layer_str = "transformer/adaptive_softmax/cutoff_%d/" % i
if config.tie_weight:
tf_to_pt_map.update({
layer_str + 'b': out_l.bias})
else:
raise NotImplementedError
# I don't think this is implemented in the TF code
tf_to_pt_map.update({
layer_str + 'lookup_table': out_l.weight,
layer_str + 'b': out_l.bias})
if not tie_proj:
tf_to_pt_map.update({
layer_str + 'proj': proj_l
})
# Now load the rest of the transformer
model = model.transformer
# Embeddings
for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
layer_str = "transformer/adaptive_embed/cutoff_%d/" % i
tf_to_pt_map.update({
layer_str + 'lookup_table': embed_l.weight,
layer_str + 'proj_W': proj_l
})
# Transformer blocks
for i, b in enumerate(model.layers):
layer_str = "transformer/layer_%d/" % i
tf_to_pt_map.update({
layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
})
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
for b in model.layers:
r_r_list.append(b.dec_attn.r_r_bias)
r_w_list.append(b.dec_attn.r_w_bias)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
tf_to_pt_map.update({
'transformer/r_r_bias': r_r_list,
'transformer/r_w_bias': r_w_list})
return tf_to_pt_map
def load_tf_weights_in_transfo_xl(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_to_pytorch_map(model, config)
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
for name, pointer in tf_to_pt_map.items():
assert name in tf_weights
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if 'kernel' in name or 'proj' in name:
array = np.transpose(array)
if ('r_r_bias' in name or 'r_w_bias' in name) and len(pointer) > 1:
# Here we will split the TF weigths
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
print("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + '/Adam', None)
tf_weights.pop(name + '/Adam_1', None)
print("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys())))
return model
class TransfoXLConfig(object):
"""Configuration class to store the configuration of a `TransfoXLModel`.
"""
def __init__(self,
vocab_size_or_config_json_file=267735,
cutoffs=[20000, 40000, 200000],
d_model=1024,
d_embed=1024,
n_head=16,
d_head=64,
d_inner=4096,
div_val=4,
pre_lnorm=False,
n_layer=18,
tgt_len=128,
ext_len=0,
mem_len=1600,
clamp_len=1000,
same_length=True,
proj_share_all_but_first=True,
attn_type=0,
sample_softmax=-1,
adaptive=True,
tie_weight=True,
dropout=0.1,
dropatt=0.0,
untie_r=True,
init="normal",
init_range=0.01,
proj_init_std=0.01,
init_std=0.02):
"""Constructs TransfoXLConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `TransfoXLModel` or a configuration json file.
cutoffs: cutoffs for the adaptive softmax
d_model: Dimensionality of the model's hidden states.
d_embed: Dimensionality of the embeddings
d_head: Dimensionality of the model's heads.
div_val: divident value for adapative input and softmax
pre_lnorm: apply LayerNorm to the input instead of the output
d_inner: Inner dimension in FF
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
tgt_len: number of tokens to predict
ext_len: length of the extended context
mem_len: length of the retained previous heads
same_length: use the same attn length for all tokens
proj_share_all_but_first: True to share all but first projs, False not to share.
attn_type: attention type. 0 for Transformer-XL, 1 for Shaw et al, 2 for Vaswani et al, 3 for Al Rfou et al.
clamp_len: use the same pos embeddings after clamp_len
sample_softmax: number of samples in sampled softmax
adaptive: use adaptive softmax
tie_weight: tie the word embedding and softmax weights
dropout: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
dropatt: The dropout ratio for the attention probabilities.
untie_r: untie relative position biases
embd_pdrop: The dropout ratio for the embeddings.
init: parameter initializer to use
init_range: parameters initialized by U(-init_range, init_range).
proj_init_std: parameters initialized by N(0, init_std)
init_std: parameters initialized by N(0, init_std)
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.n_token = vocab_size_or_config_json_file
self.cutoffs = []
self.cutoffs.extend(cutoffs)
self.tie_weight = tie_weight
if proj_share_all_but_first:
self.tie_projs = [False] + [True] * len(self.cutoffs)
else:
self.tie_projs = [False] + [False] * len(self.cutoffs)
self.d_model = d_model
self.d_embed = d_embed
self.d_head = d_head
self.d_inner = d_inner
self.div_val = div_val
self.pre_lnorm = pre_lnorm
self.n_layer = n_layer
self.n_head = n_head
self.tgt_len = tgt_len
self.ext_len = ext_len
self.mem_len = mem_len
self.same_length = same_length
self.attn_type = attn_type
self.clamp_len = clamp_len
self.sample_softmax = sample_softmax
self.adaptive = adaptive
self.dropout = dropout
self.dropatt = dropatt
self.untie_r = untie_r
self.init = init
self.init_range = init_range
self.proj_init_std = proj_init_std
self.init_std = init_std
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `TransfoXLConfig` from a Python dictionary of parameters."""
config = TransfoXLConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `TransfoXLConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:,None,:].expand(-1, bsz, -1)
else:
return pos_emb[:,None,:]
class PositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False):
super(PositionwiseFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner), nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
##### layer normalization + positionwise feed-forward
core_out = self.CoreNet(self.layer_norm(inp))
##### residual connection
output = core_out + inp
else:
##### positionwise feed-forward
core_out = self.CoreNet(inp)
##### residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
pre_lnorm=False, r_r_bias=None, r_w_bias=None):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
else:
self.r_r_bias = r_r_bias
self.r_w_bias = r_w_bias
def forward(self, h, attn_mask=None, mems=None):
##### multihead attention
# [hlen x bsz x n_head x d_head]
if mems is not None:
c = torch.cat([mems, h], 0)
else:
c = h
if self.pre_lnorm:
##### layer normalization
c = self.layer_norm(c)
head_q = self.q_net(h)
head_k, head_v = torch.chunk(self.kv_net(c), 2, -1)
head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head)
head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head)
head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head)
# [qlen x klen x bsz x n_head]
attn_score = torch.einsum('ibnd,jbnd->ijbn', (head_q, head_k))
attn_score.mul_(self.scale)
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf'))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
# [qlen x klen x bsz x n_head] + [klen x bsz x n_head x d_head] -> [qlen x bsz x n_head x d_head]
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, head_v))
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = h + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(h + attn_out)
return output
class RelMultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
tgt_len=None, ext_len=None, mem_len=None, pre_lnorm=False,
r_r_bias=None, r_w_bias=None):
super(RelMultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
else:
self.r_r_bias = r_r_bias
self.r_w_bias = r_w_bias
def _parallelogram_mask(self, h, w, left=False):
mask = torch.ones((h, w)).byte()
m = min(h, w)
mask[:m,:m] = torch.triu(mask[:m,:m])
mask[-m:,-m:] = torch.tril(mask[-m:,-m:])
if left:
return mask
else:
return mask.flip(0)
def _shift(self, x, qlen, klen, mask, left=False):
if qlen > 1:
zero_pad = torch.zeros((x.size(0), qlen-1, x.size(2), x.size(3)),
device=x.device, dtype=x.dtype)
else:
zero_pad = torch.zeros(0, device=x.device, dtype=x.dtype)
if left:
mask = mask.flip(1)
x_padded = torch.cat([zero_pad, x], dim=1).expand(qlen, -1, -1, -1)
else:
x_padded = torch.cat([x, zero_pad], dim=1).expand(qlen, -1, -1, -1)
x = x_padded.masked_select(mask[:,:,None,None]) \
.view(qlen, klen, x.size(2), x.size(3))
return x
def _rel_shift(self, x, zero_triu=False):
zero_pad_shape = (x.size(0), 1) + x.size()[2:]
zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:]
x_padded = x_padded.view(*x_padded_shape)
x = x_padded[1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(0), x.size(1)))
x = x * torch.tril(ones, x.size(1) - x.size(0))[:,:,None,None]
return x
def forward(self, w, r, attn_mask=None, mems=None):
raise NotImplementedError
class RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
def forward(self, w, r, attn_mask=None, mems=None):
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
#### compute attention score
rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + self.r_r_bias
BD = torch.einsum('ibnd,jnd->ijbn', (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
BD = self._rel_shift(BD)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score = attn_score.float().masked_fill(
attn_mask[None,:,:,None], -1e30).type_as(attn_score)
elif attn_mask.dim() == 3:
attn_score = attn_score.float().masked_fill(
attn_mask[:,:,:,None], -1e30).type_as(attn_score)
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class RelLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None):
# r_emb: [klen, n_head, d_head], used for term B
# r_w_bias: [n_head, d_head], used for term C
# r_bias: [klen, n_head], used for term D
qlen, bsz = w.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head)
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head)
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head)
if klen > r_emb.size(0):
r_emb_pad = r_emb[0:1].expand(klen-r_emb.size(0), -1, -1)
r_emb = torch.cat([r_emb_pad, r_emb], 0)
r_bias_pad = r_bias[0:1].expand(klen-r_bias.size(0), -1)
r_bias = torch.cat([r_bias_pad, r_bias], 0)
else:
r_emb = r_emb[-klen:]
r_bias = r_bias[-klen:]
#### compute attention score
rw_head_q = w_head_q + r_w_bias[None] # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
B_ = torch.einsum('ibnd,jnd->ijbn', (w_head_q, r_emb)) # qlen x klen x bsz x n_head
D_ = r_bias[None, :, None] # 1 x klen x 1 x n_head
BD = self._rel_shift(B_ + D_)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf'))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class DecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs):
super(DecoderLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout,
**kwargs):
super(RelLearnableDecoderLayer, self).__init__()
self.dec_attn = RelLearnableMultiHeadAttn(n_head, d_model, d_head, dropout,
**kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, r_emb, r_w_bias, r_bias, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r_emb, r_w_bias, r_bias,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelPartialLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout,
**kwargs):
super(RelPartialLearnableDecoderLayer, self).__init__()
self.dec_attn = RelPartialLearnableMultiHeadAttn(n_head, d_model,
d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, r, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class AdaptiveEmbedding(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
sample_softmax=False):
super(AdaptiveEmbedding, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = cutoffs + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(
nn.Embedding(n_token, d_embed, sparse=sample_softmax>0)
)
if d_proj != d_embed:
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_embed)))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx-l_idx, d_emb_i))
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_emb_i)))
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
emb_flat = torch.zeros([inp_flat.size(0), self.d_proj],
dtype=param.dtype, device=param.device)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
emb_flat.index_copy_(0, indices_i, emb_i)
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
return embed
class TransfoXLPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(TransfoXLPreTrainedModel, self).__init__()
if not isinstance(config, TransfoXLConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `TransfoXLConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weight(self, weight):
if self.config.init == 'uniform':
nn.init.uniform_(weight, -self.config.init_range, self.config.init_range)
elif self.config.init == 'normal':
nn.init.normal_(weight, 0.0, self.config.init_std)
def init_bias(self, bias):
nn.init.constant_(bias, 0.0)
def init_weights(self, m):
""" Initialize the weights.
"""
classname = m.__class__.__name__
if classname.find('Linear') != -1:
if hasattr(m, 'weight') and m.weight is not None:
self.init_weight(m.weight)
if hasattr(m, 'bias') and m.bias is not None:
self.init_bias(m.bias)
elif classname.find('AdaptiveEmbedding') != -1:
if hasattr(m, 'emb_projs'):
for i in range(len(m.emb_projs)):
if m.emb_projs[i] is not None:
nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std)
elif classname.find('Embedding') != -1:
if hasattr(m, 'weight'):
self.init_weight(m.weight)
elif classname.find('ProjectedAdaptiveLogSoftmax') != -1:
if hasattr(m, 'cluster_weight') and m.cluster_weight is not None:
self.init_weight(m.cluster_weight)
if hasattr(m, 'cluster_bias') and m.cluster_bias is not None:
self.init_bias(m.cluster_bias)
if hasattr(m, 'out_projs'):
for i in range(len(m.out_projs)):
if m.out_projs[i] is not None:
nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std)
elif classname.find('LayerNorm') != -1:
if hasattr(m, 'weight'):
nn.init.normal_(m.weight, 1.0, self.config.init_std)
if hasattr(m, 'bias') and m.bias is not None:
self.init_bias(m.bias)
elif classname.find('TransformerLM') != -1:
if hasattr(m, 'r_emb'):
self.init_weight(m.r_emb)
if hasattr(m, 'r_w_bias'):
self.init_weight(m.r_w_bias)
if hasattr(m, 'r_r_bias'):
self.init_weight(m.r_r_bias)
if hasattr(m, 'r_bias'):
self.init_bias(m.r_bias)
def set_num_special_tokens(self, num_special_tokens):
pass
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
"""
Instantiate a TransfoXLPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `transfo-xl-wt103`
- a path or url to a pretrained model archive containing:
. `transfo_xl_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a TransfoXLModel instance
- a path or url to a pretrained model archive containing:
. `transfo_xl_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
*inputs, **kwargs: additional input for the specific TransformerXL class
"""
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
cache_dir = kwargs.get('cache_dir', None)
kwargs.pop('cache_dir', None)
from_tf = kwargs.get('from_tf', False)
kwargs.pop('from_tf', None)
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
archive_file, config_file))
return None
if resolved_archive_file == archive_file and resolved_config_file == config_file:
logger.info("loading weights file {}".format(archive_file))
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = TransfoXLConfig.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu')
if from_tf:
# Directly load from a TensorFlow checkpoint
return load_tf_weights_in_transfo_xl(model, config, pretrained_model_name_or_path)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'transformer') and any(s.startswith('transformer.') for s in state_dict.keys()):
start_prefix = 'transformer.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
# Make sure we are still sharing the input and output embeddings
if hasattr(model, 'tie_weights'):
model.tie_weights()
return model
class TransfoXLModel(TransfoXLPreTrainedModel):
"""Transformer XL model ("Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context").
Transformer XL use a relative positioning (with sinusiodal patterns) and adaptive softmax inputs which means that:
- you don't need to specify positioning embeddings indices
- the tokens in the vocabulary have to be sorted to decreasing frequency.
Params:
config: a TransfoXLConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the token indices selected in the range [0, self.config.n_token[
`mems`: optional memomry of hidden states from previous forward passes
as a list (num layers) of hidden states at the entry of each layer
each hidden states has shape [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Outputs:
A tuple of (last_hidden_state, new_mems)
`last_hidden_state`: the encoded-hidden-states at the top of the model
as a torch.FloatTensor of size [batch_size, sequence_length, self.config.d_model]
`new_mems`: list (num layers) of updated mem states at the entry of each layer
each mem state is a torch.FloatTensor of size [self.config.mem_len, batch_size, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_ids_next = torch.LongTensor([[53, 21, 1], [64, 23, 100]])
config = TransfoXLConfig()
model = TransfoXLModel(config)
last_hidden_state, new_mems = model(input_ids)
# Another time on input_ids_next using the memory:
last_hidden_state, new_mems = model(input_ids_next, new_mems)
```
"""
def __init__(self, config):
super(TransfoXLModel, self).__init__(config)
self.n_token = config.n_token
self.d_embed = config.d_embed
self.d_model = config.d_model
self.n_head = config.n_head
self.d_head = config.d_head
self.word_emb = AdaptiveEmbedding(config.n_token, config.d_embed, config.d_model, config.cutoffs,
div_val=config.div_val)
self.drop = nn.Dropout(config.dropout)
self.n_layer = config.n_layer
self.tgt_len = config.tgt_len
self.mem_len = config.mem_len
self.ext_len = config.ext_len
self.max_klen = config.tgt_len + config.ext_len + config.mem_len
self.attn_type = config.attn_type
if not config.untie_r:
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.layers = nn.ModuleList()
if config.attn_type == 0: # the default attention
for i in range(config.n_layer):
self.layers.append(
RelPartialLearnableDecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
tgt_len=config.tgt_len, ext_len=config.ext_len, mem_len=config.mem_len,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
elif config.attn_type == 1: # learnable embeddings
for i in range(config.n_layer):
self.layers.append(
RelLearnableDecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
tgt_len=config.tgt_len, ext_len=config.ext_len, mem_len=config.mem_len,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
elif config.attn_type in [2, 3]: # absolute embeddings
for i in range(config.n_layer):
self.layers.append(
DecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
self.same_length = config.same_length
self.clamp_len = config.clamp_len
if self.attn_type == 0: # default attention
self.pos_emb = PositionalEmbedding(self.d_model)
elif self.attn_type == 1: # learnable
self.r_emb = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head, self.d_head))
self.r_bias = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head))
elif self.attn_type == 2: # absolute standard
self.pos_emb = PositionalEmbedding(self.d_model)
elif self.attn_type == 3: # absolute deeper SA
self.r_emb = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head, self.d_head))
self.apply(self.init_weights)
def backward_compatible(self):
self.sample_softmax = -1
def reset_length(self, tgt_len, ext_len, mem_len):
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
def init_mems(self, data):
if self.mem_len > 0:
mems = []
param = next(self.parameters())
for i in range(self.n_layer):
empty = torch.zeros(self.mem_len, data.size(1), self.config.d_model,
dtype=param.dtype, device=param.device)
mems.append(empty)
return mems
else:
return None
def _update_mems(self, hids, mems, qlen, mlen):
# does not deal with None
if mems is None: return None
# mems is not None
assert len(hids) == len(mems), 'len(hids) != len(mems)'
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + max(0, qlen - 0 - self.ext_len)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
return new_mems
def _forward(self, dec_inp, mems=None):
qlen, bsz = dec_inp.size()
word_emb = self.word_emb(dec_inp)
mlen = mems[0].size(0) if mems is not None else 0
klen = mlen + qlen
if self.same_length:
all_ones = word_emb.new_ones(qlen, klen)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (torch.triu(all_ones, 1+mlen)
+ torch.tril(all_ones, -mask_shift_len)).byte()[:, :, None] # -1
else:
dec_attn_mask = torch.triu(
word_emb.new_ones(qlen, klen), diagonal=1+mlen).byte()[:,:,None]
hids = []
if self.attn_type == 0: # default
pos_seq = torch.arange(klen-1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb)
pos_emb = self.drop(pos_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, pos_emb, dec_attn_mask=dec_attn_mask, mems=mems_i)
elif self.attn_type == 1: # learnable
core_out = self.drop(word_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
if self.clamp_len > 0:
r_emb = self.r_emb[i][-self.clamp_len :]
r_bias = self.r_bias[i][-self.clamp_len :]
else:
r_emb, r_bias = self.r_emb[i], self.r_bias[i]
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, r_emb, self.r_w_bias[i],
r_bias, dec_attn_mask=dec_attn_mask, mems=mems_i)
elif self.attn_type == 2: # absolute
pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb + pos_emb[-qlen:])
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
if mems_i is not None and i == 0:
mems_i += pos_emb[:mlen]
core_out = layer(core_out, dec_attn_mask=dec_attn_mask,
mems=mems_i)
elif self.attn_type == 3:
core_out = self.drop(word_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
if mems_i is not None and mlen > 0:
cur_emb = self.r_emb[i][:-qlen]
cur_size = cur_emb.size(0)
if cur_size < mlen:
cur_emb_pad = cur_emb[0:1].expand(mlen-cur_size, -1, -1)
cur_emb = torch.cat([cur_emb_pad, cur_emb], 0)
else:
cur_emb = cur_emb[-mlen:]
mems_i += cur_emb.view(mlen, 1, -1)
core_out += self.r_emb[i][-qlen:].view(qlen, 1, -1)
core_out = layer(core_out, dec_attn_mask=dec_attn_mask,
mems=mems_i)
core_out = self.drop(core_out)
new_mems = self._update_mems(hids, mems, mlen, qlen)
return core_out, new_mems
def forward(self, input_ids, mems=None):
""" Params:
input_ids :: [bsz, len]
mems :: optional mems from previous forwar passes (or init_mems)
list (num layers) of mem states at the entry of each layer
shape :: [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Returns:
tuple (last_hidden, new_mems) where:
new_mems: list (num layers) of mem states at the entry of each layer
shape :: [self.config.mem_len, bsz, self.config.d_model]
last_hidden: output of the last layer:
shape :: [bsz, len, self.config.d_model]
"""
# the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
# so we transpose here from shape [bsz, len] to shape [len, bsz]
input_ids = input_ids.transpose(0, 1).contiguous()
if mems is None:
mems = self.init_mems(input_ids)
last_hidden, new_mems = self._forward(input_ids, mems=mems)
# We transpose back here to shape [bsz, len, hidden_dim]
last_hidden = last_hidden.transpose(0, 1).contiguous()
return (last_hidden, new_mems)
class TransfoXLLMHeadModel(TransfoXLPreTrainedModel):
"""Transformer XL model ("Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context").
This model add an (adaptive) softmax head on top of the TransfoXLModel
Transformer XL use a relative positioning (with sinusiodal patterns) and adaptive softmax inputs which means that:
- you don't need to specify positioning embeddings indices
- the tokens in the vocabulary have to be sorted to decreasing frequency.
Call self.tie_weights() if you update/load the weights of the transformer to keep the weights tied.
Params:
config: a TransfoXLConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the token indices selected in the range [0, self.config.n_token[
`target`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with the target token indices selected in the range [0, self.config.n_token[
`mems`: an optional memory of hidden states from previous forward passes
as a list (num layers) of hidden states at the entry of each layer
each hidden states has shape [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Outputs:
A tuple of (last_hidden_state, new_mems)
`softmax_output`: output of the (adaptive) softmax:
if target is None:
Negative log likelihood of shape [batch_size, sequence_length]
else:
log probabilities of tokens, shape [batch_size, sequence_length, n_tokens]
`new_mems`: list (num layers) of updated mem states at the entry of each layer
each mem state is a torch.FloatTensor of size [self.config.mem_len, batch_size, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_ids_next = torch.LongTensor([[53, 21, 1], [64, 23, 100]])
config = TransfoXLConfig()
model = TransfoXLModel(config)
last_hidden_state, new_mems = model(input_ids)
# Another time on input_ids_next using the memory:
last_hidden_state, new_mems = model(input_ids_next, mems=new_mems)
```
"""
def __init__(self, config):
super(TransfoXLLMHeadModel, self).__init__(config)
self.transformer = TransfoXLModel(config)
self.sample_softmax = config.sample_softmax
# use sampled softmax
if config.sample_softmax > 0:
self.out_layer = nn.Linear(config.d_model, config.n_token)
self.sampler = LogUniformSampler(config.n_token, config.sample_softmax)
# use adaptive softmax (including standard softmax)
else:
self.crit = ProjectedAdaptiveLogSoftmax(config.n_token, config.d_embed, config.d_model,
config.cutoffs, div_val=config.div_val)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
""" Run this to be sure output and input (adaptive) softmax weights are tied """
# sampled softmax
if self.sample_softmax > 0:
if self.config.tie_weight:
self.out_layer.weight = self.transformer.word_emb.weight
# adaptive softmax (including standard softmax)
else:
if self.config.tie_weight:
for i in range(len(self.crit.out_layers)):
self.crit.out_layers[i].weight = self.transformer.word_emb.emb_layers[i].weight
if self.config.tie_projs:
for i, tie_proj in enumerate(self.config.tie_projs):
if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
elif tie_proj and self.config.div_val != 1:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
def reset_length(self, tgt_len, ext_len, mem_len):
self.transformer.reset_length(tgt_len, ext_len, mem_len)
def init_mems(self, data):
return self.transformer.init_mems(data)
def forward(self, input_ids, target=None, mems=None):
""" Params:
input_ids :: [bsz, len]
target :: [bsz, len]
Returns:
tuple(softmax_output, new_mems) where:
new_mems: list (num layers) of hidden states at the entry of each layer
shape :: [mem_len, bsz, self.config.d_model] :: Warning: shapes are transposed here w. regards to input_ids
softmax_output: output of the (adaptive) softmax:
if target is None:
Negative log likelihood of shape :: [bsz, len]
else:
log probabilities of tokens, shape :: [bsz, len, n_tokens]
"""
bsz = input_ids.size(0)
tgt_len = input_ids.size(1)
last_hidden, new_mems = self.transformer(input_ids, mems)
pred_hid = last_hidden[:, -tgt_len:]
if self.sample_softmax > 0 and self.training:
assert self.config.tie_weight
logit = sample_logits(self.transformer.word_emb, self.out_layer.bias, target, pred_hid, self.sampler)
softmax_output = -F.log_softmax(logit, -1)[:, :, 0]
else:
softmax_output = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target)
if target is None:
softmax_output = softmax_output.view(bsz, tgt_len, -1)
else:
softmax_output = softmax_output.view(bsz, tgt_len)
# We transpose back
return (softmax_output, new_mems)
| 42.401292 | 131 | 0.594701 |
7ebddf5061ff85632f7c6e48db13d8587cecc0b8 | 2,111 | py | Python | src/dist.py | Tradehunt/kotlin-web-site | 5c2f88fb72130071746bde2c375acbb4182858c0 | [
"Apache-2.0"
] | 1,289 | 2015-01-17T23:02:12.000Z | 2022-03-31T07:05:05.000Z | src/dist.py | Tradehunt/kotlin-web-site | 5c2f88fb72130071746bde2c375acbb4182858c0 | [
"Apache-2.0"
] | 1,230 | 2015-01-04T08:16:08.000Z | 2022-03-25T00:00:42.000Z | src/dist.py | Tradehunt/kotlin-web-site | 5c2f88fb72130071746bde2c375acbb4182858c0 | [
"Apache-2.0"
] | 3,395 | 2015-01-02T20:45:03.000Z | 2022-03-30T21:01:15.000Z | from bs4 import BeautifulSoup
from os import path, walk
dist_path = path.join(path.dirname(__file__), "../", "dist")
def get_dist_page_content(url):
path_file = dist_path + url
if url.endswith('/'):
path_file += 'index.html'
if path.exists(path_file):
with open(path_file, 'r', encoding="UTF-8") as file:
return file.read()
raise Exception('Bad response during indexing')
def get_dist_page_xml(url):
html_content = get_dist_page_content(url)
return BeautifulSoup(html_content, "html.parser")
def get_dist_page_type(url):
page_type = None
if url.endswith('/') or url.endswith('.html'):
page_type = 'Page'
if url.startswith('community'):
page_type = 'Page_Community'
if url.startswith('docs/reference'):
page_type = 'Page_Reference'
if url.startswith('docs/tutorials'):
page_type = 'Page_Tutorial'
if url.endswith('404.html'):
page_type = 'Page_NotFound'
parsed = get_dist_page_xml(url)
if url.startswith("/api/latest/"):
page_type = "Page_API_stdlib" if "jvm/stdlib" in url else "Page_API_test"
if url.startswith("/spec/"):
page_type = "Page_Spec"
if parsed.select_one("body[data-article-props]"):
page_type = 'Page_Documentation'
if parsed.find("meta", {"http-equiv": "refresh"}):
page_type = 'Redirect'
if url.endswith('pdf'):
page_type = 'File_Pdf'
if url.endswith('package-list') or url.endswith('index.yml'):
page_type = 'File_Text'
return page_type
def get_dist_pages():
paths = []
if path.isdir(dist_path):
for root, dirnames, filenames in walk(dist_path):
for filename in filenames:
prefix_path = root[len(dist_path):]
if not prefix_path: prefix_path = "/"
url = path.join(prefix_path, filename)
if url.endswith('index.html'): url = url[:-10]
paths.append((url, get_dist_page_type(url)))
return paths
| 26.721519 | 85 | 0.604927 |
d02263946c9f25c359893e1766810d8af4385d4f | 161 | py | Python | src/hist/numpy.py | amangoel185/hist | 040872b978ecfd98e9836ad0a7e5c27d8dd44d09 | [
"BSD-3-Clause"
] | 84 | 2020-02-12T02:02:58.000Z | 2022-03-23T10:50:03.000Z | src/hist/numpy.py | amangoel185/hist | 040872b978ecfd98e9836ad0a7e5c27d8dd44d09 | [
"BSD-3-Clause"
] | 213 | 2020-03-09T02:38:25.000Z | 2022-03-16T19:22:31.000Z | src/hist/numpy.py | amangoel185/hist | 040872b978ecfd98e9836ad0a7e5c27d8dd44d09 | [
"BSD-3-Clause"
] | 15 | 2020-03-14T12:05:18.000Z | 2021-11-12T14:25:07.000Z | from __future__ import annotations
from boost_histogram.numpy import histogram, histogram2d, histogramdd
__all__ = ("histogram", "histogram2d", "histogramdd")
| 26.833333 | 69 | 0.807453 |
57290b537faaf6ae5152b2f1e459f5ac314494fb | 10,993 | py | Python | onmt/Trainer.py | mingchen62/im2text-pytorch | 9516be1aad70517603383a92670c296f8d7e343e | [
"MIT"
] | 1 | 2020-03-24T08:42:38.000Z | 2020-03-24T08:42:38.000Z | onmt/Trainer.py | mingchen62/im2text-pytorch | 9516be1aad70517603383a92670c296f8d7e343e | [
"MIT"
] | null | null | null | onmt/Trainer.py | mingchen62/im2text-pytorch | 9516be1aad70517603383a92670c296f8d7e343e | [
"MIT"
] | null | null | null | from __future__ import division
"""
This is the loadable seq2seq trainer library that is
in charge of training details, loss compute, and statistics.
See train.py for a use case of this library.
Note!!! To make this a general library, we implement *only*
mechanism things here(i.e. what to do), and leave the strategy
things to users(i.e. how to do it). Also see train.py(one of the
users of this library) for the strategy things we do.
"""
import time
import sys
import math
import torch
import torch.nn as nn
import onmt
import onmt.io
import onmt.modules
class Statistics(object):
"""
Accumulator for loss statistics.
Currently calculates:
* accuracy
* perplexity
* elapsed time
"""
def __init__(self, loss=0, n_words=0, n_correct=0):
self.loss = loss
self.n_words = n_words
self.n_correct = n_correct
self.n_src_words = 0
self.start_time = time.time()
def update(self, stat):
self.loss += stat.loss
self.n_words += stat.n_words
self.n_correct += stat.n_correct
def accuracy(self):
return 100 * (self.n_correct / self.n_words)
def ppl(self):
return math.exp(min(self.loss / self.n_words, 100))
def elapsed_time(self):
return time.time() - self.start_time
def output(self, epoch, batch, n_batches, start):
"""Write out statistics to stdout.
Args:
epoch (int): current epoch
batch (int): current batch
n_batch (int): total batches
start (int): start time of epoch.
"""
t = self.elapsed_time()
print(("Epoch %2d, %5d/%5d; acc: %6.2f; ppl: %6.2f; " +
"%3.0f src tok/s; %3.0f tgt tok/s; %6.0f s elapsed") %
(epoch, batch, n_batches,
self.accuracy(),
self.ppl(),
self.n_src_words / (t + 1e-5),
self.n_words / (t + 1e-5),
time.time() - start))
sys.stdout.flush()
def log(self, prefix, experiment, lr):
t = self.elapsed_time()
experiment.add_scalar_value(prefix + "_ppl", self.ppl())
experiment.add_scalar_value(prefix + "_accuracy", self.accuracy())
experiment.add_scalar_value(prefix + "_tgtper", self.n_words / t)
experiment.add_scalar_value(prefix + "_lr", lr)
def log_tensorboard(self, prefix, writer, lr, epoch):
t = self.elapsed_time()
values = {
"ppl": self.ppl(),
"accuracy": self.accuracy(),
"tgtper": self.n_words / t,
"lr": lr,
}
writer.add_scalars(prefix, values, epoch)
class Trainer(object):
"""
Class that controls the training process.
Args:
model(:py:class:`onmt.Model.NMTModel`): translation model to train
train_loss(:obj:`onmt.Loss.LossComputeBase`):
training loss computation
valid_loss(:obj:`onmt.Loss.LossComputeBase`):
training loss computation
optim(:obj:`onmt.Optim.Optim`):
the optimizer responsible for update
trunc_size(int): length of truncated back propagation through time
shard_size(int): compute loss in shards of this size for efficiency
data_type(string): type of the source input: [text|img|audio]
norm_method(string): normalization methods: [sents|tokens]
grad_accum_count(int): accumulate gradients this many times.
"""
def __init__(self, model, train_loss, valid_loss, optim,
trunc_size=0, shard_size=32, data_type='text',
norm_method="sents", grad_accum_count=1):
# Basic attributes.
self.model = model
self.train_loss = train_loss
self.valid_loss = valid_loss
self.optim = optim
self.trunc_size = trunc_size
self.shard_size = shard_size
self.data_type = data_type
self.norm_method = norm_method
self.grad_accum_count = grad_accum_count
assert(grad_accum_count > 0)
if grad_accum_count > 1:
assert(self.trunc_size == 0), \
"""To enable accumulated gradients,
you must disable target sequence truncating."""
# Set model in training mode.
self.model.train()
def train(self, train_iter, epoch, report_func=None):
""" Train next epoch.
Args:
train_iter: training data iterator
epoch(int): the epoch number
report_func(fn): function for logging
Returns:
stats (:obj:`onmt.Statistics`): epoch loss statistics
"""
total_stats = Statistics()
report_stats = Statistics()
idx = 0
true_batchs = []
accum = 0
normalization = 0
try:
add_on = 0
if len(train_iter) % self.grad_accum_count > 0:
add_on += 1
num_batches = len(train_iter) / self.grad_accum_count + add_on
except NotImplementedError:
# Dynamic batching
num_batches = -1
for i, batch in enumerate(train_iter):
cur_dataset = train_iter.get_cur_dataset()
self.train_loss.cur_dataset = cur_dataset
true_batchs.append(batch)
accum += 1
if self.norm_method == "tokens":
normalization += batch.tgt[1:].data.view(-1) \
.ne(self.train_loss.padding_idx).sum()
else:
normalization += batch.batch_size
if accum == self.grad_accum_count:
self._gradient_accumulation(
true_batchs, total_stats,
report_stats, normalization)
if report_func is not None:
report_stats = report_func(
epoch, idx, num_batches,
total_stats.start_time, self.optim.lr,
report_stats)
true_batchs = []
accum = 0
normalization = 0
idx += 1
if len(true_batchs) > 0:
self._gradient_accumulation(
true_batchs, total_stats,
report_stats, normalization)
true_batchs = []
return total_stats
def validate(self, valid_iter):
""" Validate model.
valid_iter: validate data iterator
Returns:
:obj:`onmt.Statistics`: validation loss statistics
"""
# Set model in validating mode.
self.model.eval()
stats = Statistics()
for batch in valid_iter:
cur_dataset = valid_iter.get_cur_dataset()
self.valid_loss.cur_dataset = cur_dataset
src = onmt.io.make_features(batch, 'src', self.data_type)
if self.data_type == 'text':
_, src_lengths = batch.src
else:
src_lengths = None
tgt = onmt.io.make_features(batch, 'tgt')
# F-prop through the model.
outputs, attns, _ = self.model(src, tgt, src_lengths)
# Compute loss.
batch_stats = self.valid_loss.monolithic_compute_loss(
batch, outputs, attns)
# Update statistics.
stats.update(batch_stats)
# Set model back to training mode.
self.model.train()
return stats
def epoch_step(self, ppl, epoch):
return self.optim.update_learning_rate(ppl, epoch)
def drop_checkpoint(self, opt, epoch, fields, valid_stats):
""" Save a resumable checkpoint.
Args:
opt (dict): option object
epoch (int): epoch number
fields (dict): fields and vocabulary
valid_stats : statistics of last validation run
"""
real_model = (self.model.module
if isinstance(self.model, nn.DataParallel)
else self.model)
real_generator = (real_model.generator.module
if isinstance(real_model.generator, nn.DataParallel)
else real_model.generator)
model_state_dict = real_model.state_dict()
model_state_dict = {k: v for k, v in model_state_dict.items()
if 'generator' not in k}
generator_state_dict = real_generator.state_dict()
checkpoint = {
'model': model_state_dict,
'generator': generator_state_dict,
'vocab': onmt.io.save_fields_to_vocab(fields),
'opt': opt,
'epoch': epoch,
'optim': self.optim,
}
torch.save(checkpoint,
'%s_acc_%.2f_ppl_%.2f_e%d.pt'
% (opt.save_model, valid_stats.accuracy(),
valid_stats.ppl(), epoch))
def _gradient_accumulation(self, true_batchs, total_stats,
report_stats, normalization):
if self.grad_accum_count > 1:
self.model.zero_grad()
for batch in true_batchs:
target_size = batch.tgt.size(0)
# Truncated BPTT
if self.trunc_size:
trunc_size = self.trunc_size
else:
trunc_size = target_size
dec_state = None
#print "***self.data_type ", self.data_type
#print "***batch ", batch
src = onmt.io.make_features(batch, 'src', self.data_type)
if self.data_type == 'text':
_, src_lengths = batch.src
report_stats.n_src_words += src_lengths.sum()
else:
src_lengths = None
tgt_outer = onmt.io.make_features(batch, 'tgt')
for j in range(0, target_size-1, trunc_size):
# 1. Create truncated target.
tgt = tgt_outer[j: j + trunc_size]
# 2. F-prop all but generator.
if self.grad_accum_count == 1:
self.model.zero_grad()
#print src, tgt, src_lengths, dec_state
outputs, attns, dec_state = \
self.model(src, tgt, src_lengths, dec_state)
# 3. Compute loss in shards for memory efficiency.
batch_stats = self.train_loss.sharded_compute_loss(
batch, outputs, attns, j,
trunc_size, self.shard_size, normalization)
# 4. Update the parameters and statistics.
if self.grad_accum_count == 1:
self.optim.step()
total_stats.update(batch_stats)
report_stats.update(batch_stats)
# If truncated, don't backprop fully.
if dec_state is not None:
dec_state.detach()
if self.grad_accum_count > 1:
self.optim.step()
| 34.139752 | 79 | 0.557537 |
c6866d315765361e3787f4cbb5e12217d4cdfeaf | 1,467 | py | Python | kay/ext/appstats/middleware.py | Letractively/kay-framework | a4cfabe3497e13c3785e5ec381b9cff11a378df3 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-09-13T06:56:22.000Z | 2020-09-13T06:56:22.000Z | kay/ext/appstats/middleware.py | Letractively/kay-framework | a4cfabe3497e13c3785e5ec381b9cff11a378df3 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | kay/ext/appstats/middleware.py | Letractively/kay-framework | a4cfabe3497e13c3785e5ec381b9cff11a378df3 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python2.5
# -*- coding:utf-8 -*-
"""
AppStatsMiddleware adapted to Kay framework.
:Copyright: (c) 2010 Ian Lewis <ianmlewis@gmail.com>,
:license: BSD, see LICENSE for more details.
"""
from kay.conf import settings
class AppStatsMiddleware(object):
"""
Middleware to enable appstats recording.
Based off of the the AppstatsDjangoMiddleware in the
Appengine SDK
"""
def _record_ok(self, request):
if 'kay.ext.live_settings' in settings.INSTALLED_APPS:
from kay.ext.live_settings import live_settings
record_ok = live_settings.get("kay.ext.appstats.middleware", "on")
request._appstats_record = (record_ok.lower() == "on")
return request._appstats_record
else:
return True
def process_request(self, request):
"""
Called by Kay before deciding which view to execute.
"""
if self._record_ok(request):
from google.appengine.ext.appstats.recording import start_recording
start_recording()
def process_response(self, request, response):
"""
Stops recording. Optionally sets some extension data for
FirePython.
"""
if getattr(request, '_appstats_record', True):
from google.appengine.ext.appstats.recording import end_recording
firepython_set_extension_data = getattr(
request,
'firepython_set_extension_data',
None)
end_recording(response.status_code, firepython_set_extension_data)
return response
| 28.211538 | 73 | 0.70893 |
a9e637161e36af87c923e992209cab10dc40092b | 868 | py | Python | tree_bark_synthesis/TextureByNumbers/init/chamfer_distance_transformation.py | laitoch/tree-bark-synthesis | 0bd43d6699d2e05f62d144f310874f986bbd91d2 | [
"MIT"
] | null | null | null | tree_bark_synthesis/TextureByNumbers/init/chamfer_distance_transformation.py | laitoch/tree-bark-synthesis | 0bd43d6699d2e05f62d144f310874f986bbd91d2 | [
"MIT"
] | null | null | null | tree_bark_synthesis/TextureByNumbers/init/chamfer_distance_transformation.py | laitoch/tree-bark-synthesis | 0bd43d6699d2e05f62d144f310874f986bbd91d2 | [
"MIT"
] | null | null | null | import numpy as np
from neighborhood import *
def _surround_with_border(array_2d, size, fill_value):
row = np.full([size, array_2d.shape[1]+2*size], fill_value)
col = np.full([array_2d.shape[0], size], fill_value)
return np.block([[row],[col,array_2d,col],[row]])
def chamfer_distance_transformation(image):
""" DT3,4 sequential algorithm [Fan citation [20]]"""
image = _surround_with_border(image, 1, 255)
coefs = np.array((4,3,4,3,0))
for i in range(1, image.shape[0]-1):
for j in range(1, image.shape[1]-1):
image[i,j] = np.min(neighborhood_lm(image,i,j,3) + coefs)
coefs = np.array((0,3,4,3,4))
for i in range(image.shape[0]-2, 0, -1):
for j in range(image.shape[1]-2, 0, -1):
image[i,j] = np.min(neighborhood_mr(image,i,j,3) + coefs)
image = image[1:-1, 1:-1]
return image
| 33.384615 | 69 | 0.624424 |
8ad6b57d6a96a856f4f59f22acdfcbb82e0641b7 | 2,288 | py | Python | docs/conf.py | Zamy97/internship_5 | f0a037ac3bc4ee0d465c6bb8aefce5bfe13e9722 | [
"MIT"
] | null | null | null | docs/conf.py | Zamy97/internship_5 | f0a037ac3bc4ee0d465c6bb8aefce5bfe13e9722 | [
"MIT"
] | null | null | null | docs/conf.py | Zamy97/internship_5 | f0a037ac3bc4ee0d465c6bb8aefce5bfe13e9722 | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
import django
if os.getenv("READTHEDOCS", default=False) == "True":
sys.path.insert(0, os.path.abspath(".."))
os.environ["DJANGO_READ_DOT_ENV_FILE"] = "True"
os.environ["USE_DOCKER"] = "no"
else:
sys.path.insert(0, os.path.abspath(".."))
os.environ["DATABASE_URL"] = "sqlite:///readthedocs.db"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
django.setup()
# -- Project information -----------------------------------------------------
project = "internship_5"
copyright = """2020, Daniel Roy Greenfeld"""
author = "Daniel Roy Greenfeld"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
| 36.31746 | 79 | 0.667832 |
cc04122f7499096e1521e29648720c1dff47c279 | 5,806 | py | Python | src/networks.py | ajenningsfrankston/ICIAR2018 | b0e6a19ee31ab941113294b32055bd14deca35cd | [
"MIT"
] | 175 | 2018-01-20T04:20:47.000Z | 2022-03-14T02:09:22.000Z | src/networks.py | ajenningsfrankston/ICIAR2018 | b0e6a19ee31ab941113294b32055bd14deca35cd | [
"MIT"
] | 12 | 2018-04-28T15:24:50.000Z | 2020-10-01T10:48:55.000Z | src/networks.py | ajenningsfrankston/ICIAR2018 | b0e6a19ee31ab941113294b32055bd14deca35cd | [
"MIT"
] | 75 | 2018-03-17T01:29:19.000Z | 2022-03-30T04:24:52.000Z | import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class BaseNetwork(nn.Module):
def __init__(self, name, channels=1):
super(BaseNetwork, self).__init__()
self._name = name
self._channels = channels
def name(self):
return self._name
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class PatchWiseNetwork(BaseNetwork):
def __init__(self, channels=1):
super(PatchWiseNetwork, self).__init__('pw' + str(channels), channels)
self.features = nn.Sequential(
# Block 1
nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=16, out_channels=16, kernel_size=2, stride=2),
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
# Block 2
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=2, stride=2),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
# Block 3
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=2, stride=2),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
# Block 4
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
# Block 5
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=channels, kernel_size=1, stride=1),
)
self.classifier = nn.Sequential(
nn.Linear(channels * 64 * 64, 4),
)
self.initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
x = F.log_softmax(x, dim=1)
return x
class ImageWiseNetwork(BaseNetwork):
def __init__(self, channels=1):
super(ImageWiseNetwork, self).__init__('iw' + str(channels), channels)
self.features = nn.Sequential(
# Block 1
nn.Conv2d(in_channels=12 * channels, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=2, stride=2),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
# Block 2
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=2, stride=2),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=1, kernel_size=1, stride=1),
)
self.classifier = nn.Sequential(
nn.Linear(1 * 16 * 16, 128),
nn.ReLU(inplace=True),
nn.Dropout(0.5, inplace=True),
nn.Linear(128, 128),
nn.ReLU(inplace=True),
nn.Dropout(0.5, inplace=True),
nn.Linear(128, 64),
nn.ReLU(inplace=True),
nn.Dropout(0.5, inplace=True),
nn.Linear(64, 4),
)
self.initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
x = F.log_softmax(x, dim=1)
return x
| 35.839506 | 102 | 0.568033 |
149fa0d5e891c6cba49b0c9c675c8355b73aefc8 | 13,079 | py | Python | backup/multiview - Copy 4.10.py | puhachov/Discovering-spammers-from-multiple-views | 0484552af19e68148bd7c29d3a726b4323c00834 | [
"MIT"
] | 1 | 2022-01-23T11:28:53.000Z | 2022-01-23T11:28:53.000Z | backup/multiview - Copy 4.10.py | puhachov/Discovering-spammers-from-multiple-views | 0484552af19e68148bd7c29d3a726b4323c00834 | [
"MIT"
] | null | null | null | backup/multiview - Copy 4.10.py | puhachov/Discovering-spammers-from-multiple-views | 0484552af19e68148bd7c29d3a726b4323c00834 | [
"MIT"
] | null | null | null | import numpy as np
import scipy
import copy
from numpy.linalg import multi_dot
class multiview():
"""
U - base matrices
X is passed as a list containing three multiview numpy matrices
Y is passed as a numpy matrix
regularisation_coefficients is a vector [[lambda_v], lambda_f, lambda_star_f, lambda_W]
where:
- [lambda_v]: penalizes norm of the difference V*Q-V_star for each view
- lambda_f: penalizes sum of the squared norms of U and V
- lambda_star_f: penalizes sum of squared norm of matrix V_star
- lambda_W penalizes term in SVM
"""
#some const parameters for optimisation
_MAX_ITER = 10000
_TOLERANCE = 1e-4
_LARGE_NUMBER = 1e100
def __init__(self, X, Y, U = None, V = None, num_components = None, view_weights = None, W = None):
self.X_nv = [np.copy(X[v]) for v in range(len(X))]
self.n_v = len(X)
self.ground_truth = np.copy(Y)
self.Y = np.copy(Y)
if view_weights is None:
self.beta = np.array([-np.log(5), np.log(3), np.log(3)])
else:
self.beta = view_weights
if num_components is None:
self.K = 2
else:
self.K = num_components
if U is None:
self.U = [np.ones((self.X_nv[v].shape[0], self.K)) for v in range(self.n_v)]
else:
self.U = U
if V is None:
self.V = [np.ones((self.X_nv[0].shape[1], self.K)) for v in range(self.n_v)]
else:
self.V = V
self.V_star = np.random.random((self.X_nv[0].shape[1], self.K))
self.Q = [None]*(self.n_v)
#TODO: pass reg. coef. into a method rather than class constructor??
regularisation_coefficients = None
self.lambda_v = None
self.lambda_f = None
self.lambda_star_f = None
self.lambda_W = None
self.alpha = None
self.W = np.zeros((2,self.K))
self.eta = None
"""HINGE LOSS FUNCTION ---------------------------------------------------------------------------------------------
"""
@staticmethod
def hinge_loss(z):
if (z <= 0):
return 1/2 - z
elif (z >= 1):
return 0
else:
return 1/2 * (1 - z)**2
@staticmethod
def hinge_loss_derivative(z):
if (z <= 0):
return -1
elif (z >= 1):
return 0
else:
return z - 1
"""DEFINING self.CTIVE FUNCTION ----------------------------------------------------------------------------------------
Total self.ctive Function is O = O_M + O_SVM
"""
def _total_obj_func(self):
"""Calculate Q from U and V"""
for v in range(self.n_v):
diag_vector = [sum(self.U[v][:,i]) for i in range(self.K)] #i -column index
self.Q[v] = np.diag(diag_vector)
"""Calculate multiview term O_M"""
term_1 = [self.X_nv[v] - np.linalg.multi_dot([self.U[v],
np.linalg.inv(self.Q[v]),
self.Q[v],
np.transpose(self.V[v])])
for v in range (self.n_v)]
term_1_norm = list(map(lambda X: scipy.linalg.norm(X, ord = 'fro')**2, term_1))
term_2 = [self.V[v].dot(self.Q[v]) - self.V_star for v in range (self.n_v)]
term_2_norm = list(map(lambda X: scipy.linalg.norm(X, ord = 'fro')**2, term_2))
term_3 = self.lambda_star_f/2 * np.linalg.norm(self.V_star, ord = 'fro')
term_4 = [np.linalg.norm(self.U[v], ord = 'fro')**2 + np.linalg.norm(self.V[v], ord = 'fro')**2 for v in range (self.n_v)]
O_M = 1/2 * np.sum(self.beta * term_1_norm + self.lambda_v * term_2_norm) + self.lambda_star_f * term_3 +self.lambda_f/2 * np.sum(term_4)
"""SVM Function Term"""
l = self.Y.shape[0]
S = 0
for i in range(l):
S += multiview.hinge_loss(self.Y[i,:].dot(self.W.dot(np.transpose(self.V_star[i,:]))))
O_SVM = self.alpha * S + self.lambda_W/2 * np.linalg.norm(self.W, ord = 'fro')
return O_M + O_SVM
"""OPTIMIZING W.R.T. U AND V--------------------------------------------------------------------------------------"""
def _optimize_towards_U_and_V(self):
iter_count = 0
func_val_old = multiview._LARGE_NUMBER
func_val = self._total_obj_func()
U_old = copy.deepcopy(self.U)
V_old = copy.deepcopy(self.V)
while (iter_count < multiview._MAX_ITER)and (abs(func_val - func_val_old)/abs(func_val_old) > multiview._TOLERANCE):
iter_count += 1;
func_val_old = func_val
for v in range(self.n_v):
"""UPDATE U"""
#Resulted matrix is of size K*K, however, the size of U(v) = |U(v)|*K, where we assume |U(v)| < K. Hence we cut the matrix A to the size |U(V)|*K
# A = self.lambda_v[v] * self.beta[v] * (np.transpose(self.V[v]).dot(self.V_star))
# A = A[:self.U[v].shape[0]:,:]
# B = self.lambda_v[v] * self.beta[v] * (self.U[v].dot(np.transpose((self.V[v]**2)[:self.K,:])) + self.lambda_f * self.U[v])
"""TODO: Calculate coefficient B"""
numerator_U = self.beta[v]*(self.X_nv[v].dot(self.V[v])) #+ A
denominator_U = self.beta[v] * multi_dot([self.U[v], np.transpose(self.V[v]), self.V[v]]) #+ B
self.U[v] = U_old[v] * numerator_U/denominator_U
self.U[v] = self.U[v]/scipy.linalg.norm(self.U[v], ord = 'fro')
self.V[v] = self.V[v]/scipy.linalg.norm(self.U[v], ord = 'fro')
"""UPDATE V"""
numerator_V = self.beta[v] * np.transpose(self.X_nv[v]).dot(self.U[v]) + self.lambda_v[v] * self.beta[v] * self.V_star
denominator_V = self.beta[v] * multi_dot([self.V[v], np.transpose(self.U[v]), self.U[v]]) + self.lambda_v[v] * self.beta[v] * self.V[v] + self.lambda_f * self.V[v]
self.V[v] = V_old[v] * numerator_V/denominator_V
"""UPDATE OLD U AND V """
V_old[v] = self.V[v] #copy.deepcopy(self.V[v])
U_old[v] = self.U[v] #copy.deepcopy(self.U[v])
#end for
func_val = self._total_obj_func()
print("Iter: {}; Old Value {}; Current Value: {}".format(iter_count, func_val_old, func_val))
return iter_count
def _optimize_towards_V_star_and_W(self):
"""STOCHASTIC GRADIENT DESCENT"""
iter_count = 0
func_val_old = multiview._LARGE_NUMBER
func_val = self._total_obj_func()
V_star_old = copy.deepcopy(self.V_star)
W_old = copy.deepcopy(self.W)
while (iter_count < multiview._MAX_ITER)and (abs(func_val - func_val_old)/abs(func_val_old) > multiview._TOLERANCE):
iter_count += 1;
func_val_old = func_val
W_der_sum = 0
for i in range(self.Y.shape[0]):
"""CALCULATING DERIVATIVES"""
term_1 = 0
for v in range(self.n_v):
term_1 += ( -self.lambda_v[v] * self.beta[v]) * (self.V[v][i,:].dot(self.Q[v]) - self.V_star[i,:])
term_2 = self.alpha * multiview.hinge_loss_derivative(self.Y[i,:].dot(self.W).dot(np.transpose(self.V_star[i,:]))) * self.Y[i,:].dot(self.W)
term_3 = self.lambda_star_f * self.V_star[i,:]
derivative_V_star_i = term_1 + term_2 + term_3
self.V_star[i,:] = V_star_old[i,:] - self.learning_rate * derivative_V_star_i
W_der_sum += multiview.hinge_loss_derivative(self.Y[i,:].dot(self.W).dot(np.transpose(V_star_old[i,:]))) * (self.Y[i,:]).reshape((2,1)).dot((V_star_old[i,:]).reshape((1,self.K)))
derivative_W = self.alpha * W_der_sum + self.lambda_W * self.W
"""UPDATING PARAMETERS"""
V_star_old = self.V_star
self.W = W_old - self.learning_rate * derivative_W
W_old = self.W
func_val = self._total_obj_func()
print("Iter: {}; Old Value {}; Current Value: {}".format(iter_count, func_val_old, func_val))
return iter_count
def _update_betas(self):
"""BASED ON LAGRANGE MULTIPLIER"""
for v in range(self.n_v):
term_1 = self.X_nv[v] - multi_dot([self.U[v],np.linalg.inv(self.Q[v]), self.Q[v], np.transpose(self.V[v])])
term_1_norm = scipy.linalg.norm(term_1, ord = 'fro')
term_2 = self.lambda_v[v] * scipy.linalg.norm(self.V[v].dot(self.Q[v]) - self.V_star, ord = 'fro')
RE = term_1_norm**2 + term_2**2
self.beta[v] = -np.log(RE/sum([term_1_norm**2 + term_2**2 for v in range(self.n_v)]))
def solve(self, training_size, learning_rate, alpha, regularisation_coefficients = None):
"""SET UP VARIABLE PARAMETERS FOR ALGORITHM"""
if (regularisation_coefficients is None):
self.lambda_v = np.array([0.5, 0.5, 0.5])
self.lambda_f = 0.2
self.lambda_star_f = 0.1
self.lambda_W = 0.3
else:
self.lambda_v = regularisation_coefficients[0]
self.lambda_f = regularisation_coefficients[1]
self.lambda_star_f = regularisation_coefficients[2]
self.lambda_W = regularisation_coefficients[3]
self.learning_rate = learning_rate
self.eta = learning_rate
self.alpha = alpha
"""DIVIDE DATA INTO TRAINING AND TEST SET"""
self._training_size = int(self.ground_truth.shape[0] * training_size)
#training set
self.Y = self.ground_truth[:self._training_size]
self.Y_predict = np.zeros((self.ground_truth.shape[0] - self._training_size, 2))
self.Y_test = self.ground_truth[self._training_size:,:]
iter_count = 0
iter_count_UV = multiview._LARGE_NUMBER
iter_count_VW = multiview._LARGE_NUMBER
func_val_old = multiview._LARGE_NUMBER
func_val = self._total_obj_func()
while (iter_count_UV + iter_count_VW > 2):
iter_count +=1
print("Iteration {}...\n".format(iter_count))
print("Updating U and V...\n")
iter_count_UV = self._optimize_towards_U_and_V()
print("DONE updating U and V...\nUpdating V_star and W...\n\n")
iter_count_VW = self._optimize_towards_V_star_and_W()
print("DONE updating V_star and W...\nUpdating betas...\n\n")
self._update_betas()
print("Done updating betas...\n\n")
print("-------------------------------------------------------")
print("OPTIMISATION DONE")
def evaluate(self):
for i in range(self._training_size, self.ground_truth.shape[0]):
"""PREDICTING USER'S COHORT"""
w1_w2 = self.W.dot(np.transpose(self.V_star[i,:]))
if (np.sum(w1_w2) < 0):
self.Y_predict[self._training_size - i,:] = np.array([-1., 0.])
else:
self.Y_predict[self._training_size - i,:] = np.array([0., 1.])
"""CONFUSION MATRIX TP|FP
TN|FN
"""
confusion_matrix = np.zeros((2,2))
TP = 0
FP = 0
FN = 0
TN = 0
for i in range(self.Y_predict.shape[0]):
if (np.array_equal(self.Y_test[i,:], self.Y_predict[i,:]))and(np.array_equal(self.Y_predict[i,:], np.array([-1., 0.]))):
TP += 1
if (np.array_equal(self.Y_test[i,:], self.Y_predict[i,:]))and(np.array_equal(self.Y_predict[i,:], np.array([0., 1.]))):
TN += 1
if (np.array_equal(self.Y_test[i,:], np.array([-1., 0.])))and(np.array_equal(self.Y_predict[i,:], np.array([0., 1.]))):
FP += 1
if (np.array_equal(self.Y_test[i,:], np.array([0., 1.])))and(np.array_equal(self.Y_predict[i,:], np.array([-1., 0.]))):
FN += 1
confusion_matrix = pd.DataFrame(data = {'Actual_Spammer': [TP, FN], 'Actual_Legitimate': [FP, TN]}, index = ['Predicted_Spammer ','Predicted_Legitimate'])
precision = TP/(TP+FP)
recall = TP/(TP+FN)
F1_score = 2*TP/(2*TP + FP + FN)
return confusion_matrix, precision, recall, F1_score
| 41 | 203 | 0.514642 |
9fc65aae8e9b59a5150436e852cf53ea0ea408e9 | 11,423 | py | Python | openpype/lib/delivery.py | jonclothcat/OpenPype | d1208cbebc0a7f378de0062ccd653295c6399195 | [
"MIT"
] | null | null | null | openpype/lib/delivery.py | jonclothcat/OpenPype | d1208cbebc0a7f378de0062ccd653295c6399195 | [
"MIT"
] | null | null | null | openpype/lib/delivery.py | jonclothcat/OpenPype | d1208cbebc0a7f378de0062ccd653295c6399195 | [
"MIT"
] | null | null | null | """Functions useful for delivery action or loader"""
import os
import shutil
import glob
import clique
import collections
def collect_frames(files):
"""
Returns dict of source path and its frame, if from sequence
Uses clique as most precise solution
Args:
files(list): list of source paths
Returns:
(dict): {'/asset/subset_v001.0001.png': '0001', ....}
"""
collections, remainder = clique.assemble(files, minimum_items=1)
sources_and_frames = {}
if collections:
for collection in collections:
src_head = collection.head
src_tail = collection.tail
for index in collection.indexes:
src_frame = collection.format("{padding}") % index
src_file_name = "{}{}{}".format(src_head, src_frame,
src_tail)
sources_and_frames[src_file_name] = src_frame
else:
sources_and_frames[remainder.pop()] = None
return sources_and_frames
def sizeof_fmt(num, suffix='B'):
"""Returns formatted string with size in appropriate unit"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def path_from_representation(representation, anatomy):
from avalon import pipeline # safer importing
try:
template = representation["data"]["template"]
except KeyError:
return None
try:
context = representation["context"]
context["root"] = anatomy.roots
path = pipeline.format_template_with_optional_keys(
context, template
)
path = os.path.normpath(path.replace("/", "\\"))
except KeyError:
# Template references unavailable data
return None
return path
def copy_file(src_path, dst_path):
"""Hardlink file if possible(to save space), copy if not"""
from openpype.lib import create_hard_link # safer importing
if os.path.exists(dst_path):
return
try:
create_hard_link(
src_path,
dst_path
)
except OSError:
shutil.copyfile(src_path, dst_path)
def get_format_dict(anatomy, location_path):
"""Returns replaced root values from user provider value.
Args:
anatomy (Anatomy)
location_path (str): user provided value
Returns:
(dict): prepared for formatting of a template
"""
format_dict = {}
if location_path:
location_path = location_path.replace("\\", "/")
root_names = anatomy.root_names_from_templates(
anatomy.templates["delivery"]
)
if root_names is None:
format_dict["root"] = location_path
else:
format_dict["root"] = {}
for name in root_names:
format_dict["root"][name] = location_path
return format_dict
def check_destination_path(repre_id,
anatomy, anatomy_data,
datetime_data, template_name):
""" Try to create destination path based on 'template_name'.
In the case that path cannot be filled, template contains unmatched
keys, provide error message to filter out repre later.
Args:
anatomy (Anatomy)
anatomy_data (dict): context to fill anatomy
datetime_data (dict): values with actual date
template_name (str): to pick correct delivery template
Returns:
(collections.defauldict): {"TYPE_OF_ERROR":"ERROR_DETAIL"}
"""
anatomy_data.update(datetime_data)
anatomy_filled = anatomy.format_all(anatomy_data)
dest_path = anatomy_filled["delivery"][template_name]
report_items = collections.defaultdict(list)
if not dest_path.solved:
msg = (
"Missing keys in Representation's context"
" for anatomy template \"{}\"."
).format(template_name)
sub_msg = (
"Representation: {}<br>"
).format(repre_id)
if dest_path.missing_keys:
keys = ", ".join(dest_path.missing_keys)
sub_msg += (
"- Missing keys: \"{}\"<br>"
).format(keys)
if dest_path.invalid_types:
items = []
for key, value in dest_path.invalid_types.items():
items.append("\"{}\" {}".format(key, str(value)))
keys = ", ".join(items)
sub_msg += (
"- Invalid value DataType: \"{}\"<br>"
).format(keys)
report_items[msg].append(sub_msg)
return report_items
def process_single_file(
src_path, repre, anatomy, template_name, anatomy_data, format_dict,
report_items, log
):
"""Copy single file to calculated path based on template
Args:
src_path(str): path of source representation file
_repre (dict): full repre, used only in process_sequence, here only
as to share same signature
anatomy (Anatomy)
template_name (string): user selected delivery template name
anatomy_data (dict): data from repre to fill anatomy with
format_dict (dict): root dictionary with names and values
report_items (collections.defaultdict): to return error messages
log (Logger): for log printing
Returns:
(collections.defaultdict , int)
"""
# Make sure path is valid for all platforms
src_path = os.path.normpath(src_path.replace("\\", "/"))
if not os.path.exists(src_path):
msg = "{} doesn't exist for {}".format(src_path, repre["_id"])
report_items["Source file was not found"].append(msg)
return report_items, 0
anatomy_filled = anatomy.format(anatomy_data)
if format_dict:
template_result = anatomy_filled["delivery"][template_name]
delivery_path = template_result.rootless.format(**format_dict)
else:
delivery_path = anatomy_filled["delivery"][template_name]
# Backwards compatibility when extension contained `.`
delivery_path = delivery_path.replace("..", ".")
# Make sure path is valid for all platforms
delivery_path = os.path.normpath(delivery_path.replace("\\", "/"))
delivery_folder = os.path.dirname(delivery_path)
if not os.path.exists(delivery_folder):
os.makedirs(delivery_folder)
log.debug("Copying single: {} -> {}".format(src_path, delivery_path))
copy_file(src_path, delivery_path)
return report_items, 1
def process_sequence(
src_path, repre, anatomy, template_name, anatomy_data, format_dict,
report_items, log
):
""" For Pype2(mainly - works in 3 too) where representation might not
contain files.
Uses listing physical files (not 'files' on repre as a)might not be
present, b)might not be reliable for representation and copying them.
TODO Should be refactored when files are sufficient to drive all
representations.
Args:
src_path(str): path of source representation file
repre (dict): full representation
anatomy (Anatomy)
template_name (string): user selected delivery template name
anatomy_data (dict): data from repre to fill anatomy with
format_dict (dict): root dictionary with names and values
report_items (collections.defaultdict): to return error messages
log (Logger): for log printing
Returns:
(collections.defaultdict , int)
"""
src_path = os.path.normpath(src_path.replace("\\", "/"))
def hash_path_exist(myPath):
res = myPath.replace('#', '*')
glob_search_results = glob.glob(res)
if len(glob_search_results) > 0:
return True
return False
if not hash_path_exist(src_path):
msg = "{} doesn't exist for {}".format(src_path,
repre["_id"])
report_items["Source file was not found"].append(msg)
return report_items, 0
delivery_templates = anatomy.templates.get("delivery") or {}
delivery_template = delivery_templates.get(template_name)
if delivery_template is None:
msg = (
"Delivery template \"{}\" in anatomy of project \"{}\""
" was not found"
).format(template_name, anatomy.project_name)
report_items[""].append(msg)
return report_items, 0
# Check if 'frame' key is available in template which is required
# for sequence delivery
if "{frame" not in delivery_template:
msg = (
"Delivery template \"{}\" in anatomy of project \"{}\""
"does not contain '{{frame}}' key to fill. Delivery of sequence"
" can't be processed."
).format(template_name, anatomy.project_name)
report_items[""].append(msg)
return report_items, 0
dir_path, file_name = os.path.split(str(src_path))
context = repre["context"]
ext = context.get("ext", context.get("representation"))
if not ext:
msg = "Source extension not found, cannot find collection"
report_items[msg].append(src_path)
log.warning("{} <{}>".format(msg, context))
return report_items, 0
ext = "." + ext
# context.representation could be .psd
ext = ext.replace("..", ".")
src_collections, remainder = clique.assemble(os.listdir(dir_path))
src_collection = None
for col in src_collections:
if col.tail != ext:
continue
src_collection = col
break
if src_collection is None:
msg = "Source collection of files was not found"
report_items[msg].append(src_path)
log.warning("{} <{}>".format(msg, src_path))
return report_items, 0
frame_indicator = "@####@"
anatomy_data["frame"] = frame_indicator
anatomy_filled = anatomy.format(anatomy_data)
if format_dict:
template_result = anatomy_filled["delivery"][template_name]
delivery_path = template_result.rootless.format(**format_dict)
else:
delivery_path = anatomy_filled["delivery"][template_name]
delivery_path = os.path.normpath(delivery_path.replace("\\", "/"))
delivery_folder = os.path.dirname(delivery_path)
dst_head, dst_tail = delivery_path.split(frame_indicator)
dst_padding = src_collection.padding
dst_collection = clique.Collection(
head=dst_head,
tail=dst_tail,
padding=dst_padding
)
if not os.path.exists(delivery_folder):
os.makedirs(delivery_folder)
src_head = src_collection.head
src_tail = src_collection.tail
uploaded = 0
for index in src_collection.indexes:
src_padding = src_collection.format("{padding}") % index
src_file_name = "{}{}{}".format(src_head, src_padding, src_tail)
src = os.path.normpath(
os.path.join(dir_path, src_file_name)
)
dst_padding = dst_collection.format("{padding}") % index
dst = "{}{}{}".format(dst_head, dst_padding, dst_tail)
log.debug("Copying single: {} -> {}".format(src, dst))
copy_file(src, dst)
uploaded += 1
return report_items, uploaded
| 33.206395 | 79 | 0.6163 |
4286b3afbac80f935b21128e9604c1fdb33a3600 | 1,132 | py | Python | style/utils.py | A-Grabish/py-style2 | 564dad9c954b541634a204d4ef4b54d937c0a520 | [
"MIT"
] | 23 | 2018-08-08T07:12:09.000Z | 2022-03-18T08:29:35.000Z | style/utils.py | A-Grabish/py-style2 | 564dad9c954b541634a204d4ef4b54d937c0a520 | [
"MIT"
] | 2 | 2019-02-21T17:27:58.000Z | 2019-04-29T17:29:45.000Z | style/utils.py | A-Grabish/py-style2 | 564dad9c954b541634a204d4ef4b54d937c0a520 | [
"MIT"
] | 5 | 2019-01-07T23:17:55.000Z | 2022-03-10T18:35:31.000Z | import matplotlib.pyplot as plt
import matplotlib.animation as animation
import math
import numpy as np
def gallery(imgtuples, rows=1, cols=None, figsize=None):
cols = cols or int(math.ceil(len(imgtuples) / rows))
fig, axs = plt.subplots(rows, cols, figsize=figsize)
axs = np.asarray(axs).flatten()
for idx, (k,v) in enumerate(imgtuples):
axs[idx].set_title(k)
axs[idx].axis('off')
axs[idx].imshow(v)
return fig
def animate_progress(g, shape, figsize=None):
fig, ax = plt.subplots(figsize=figsize)
img = ax.imshow(np.zeros(shape, np.float32))
ax.set_axis_off()
def updateimg(x):
img.set_data(x)
return img,
return animation.FuncAnimation(fig, updateimg, frames=g, interval=100, blit=False)
def show_progress_ipython(g, shape, figsize=None):
from IPython.display import display, clear_output
fig, ax = plt.subplots(figsize=figsize)
img = ax.imshow(np.zeros(shape, np.float32))
ax.set_axis_off()
for x in g:
clear_output(wait=True)
img.set_data(x)
display(fig)
clear_output(wait=True) | 27.609756 | 86 | 0.662544 |
a4b82ed9882df3b715a284b0fdf967a5516a4db1 | 4,801 | py | Python | tests/dpaycli/test_witness.py | dpays/dpay-cli | dfa80898e1faea2cee92ebec6fe04873381bd40f | [
"MIT"
] | null | null | null | tests/dpaycli/test_witness.py | dpays/dpay-cli | dfa80898e1faea2cee92ebec6fe04873381bd40f | [
"MIT"
] | null | null | null | tests/dpaycli/test_witness.py | dpays/dpay-cli | dfa80898e1faea2cee92ebec6fe04873381bd40f | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import super
import unittest
from parameterized import parameterized
from pprint import pprint
from dpaycli import DPay
from dpaycli.witness import Witness, Witnesses, WitnessesVotedByAccount, WitnessesRankedByVote
from dpaycli.instance import set_shared_dpay_instance
from dpaycli.nodelist import NodeList
wif = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
class Testcases(unittest.TestCase):
@classmethod
def setUpClass(cls):
nodelist = NodeList()
nodelist.update_nodes(dpay_instance=DPay(node=nodelist.get_nodes(normal=True, appbase=True), num_retries=10))
cls.bts = DPay(
node=nodelist.get_nodes(),
nobroadcast=True,
unsigned=True,
keys={"active": wif},
num_retries=10
)
cls.testnet = DPay(
# node="https://testnet.timcliff.com",
node=nodelist.get_nodes(),
nobroadcast=True,
unsigned=True,
keys={"active": wif},
num_retries=10
)
# from getpass import getpass
# self.bts.wallet.unlock(getpass())
set_shared_dpay_instance(cls.bts)
cls.bts.set_default_account("test")
@parameterized.expand([
("normal"),
("testnet"),
])
def test_feed_publish(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
bts.txbuffer.clear()
w = Witness("gtg", dpay_instance=bts)
tx = w.feed_publish("4 BBD", "1 BEX")
self.assertEqual(
(tx["operations"][0][0]),
"feed_publish"
)
op = tx["operations"][0][1]
self.assertIn(
"gtg",
op["publisher"])
@parameterized.expand([
("normal"),
("testnet"),
])
def test_update(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
bts.txbuffer.clear()
w = Witness("gtg", dpay_instance=bts)
props = {"account_creation_fee": "0.1 BEX",
"maximum_block_size": 32000,
"bbd_interest_rate": 0}
tx = w.update(wif, "", props)
self.assertEqual((tx["operations"][0][0]), "witness_update")
op = tx["operations"][0][1]
self.assertIn(
"gtg",
op["owner"])
@parameterized.expand([
("normal"),
("testnet"),
])
def test_witnesses(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
w = Witnesses(dpay_instance=bts)
w.printAsTable()
self.assertTrue(len(w) > 0)
self.assertTrue(isinstance(w[0], Witness))
@parameterized.expand([
("normal"),
("testnet"),
])
def test_WitnessesVotedByAccount(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
w = WitnessesVotedByAccount("gtg", dpay_instance=bts)
w.printAsTable()
self.assertTrue(len(w) > 0)
self.assertTrue(isinstance(w[0], Witness))
@parameterized.expand([
("normal"),
("testnet"),
])
def test_WitnessesRankedByVote(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
w = WitnessesRankedByVote(dpay_instance=bts)
w.printAsTable()
self.assertTrue(len(w) > 0)
self.assertTrue(isinstance(w[0], Witness))
@parameterized.expand([
("normal"),
("testnet"),
])
def test_export(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
owner = "gtg"
if bts.rpc.get_use_appbase():
witness = bts.rpc.find_witnesses({'owners': [owner]}, api="database")['witnesses']
if len(witness) > 0:
witness = witness[0]
else:
witness = bts.rpc.get_witness_by_account(owner)
w = Witness(owner, dpay_instance=bts)
keys = list(witness.keys())
json_witness = w.json()
exclude_list = ['votes', 'virtual_last_update', 'virtual_scheduled_time']
for k in keys:
if k not in exclude_list:
if isinstance(witness[k], dict) and isinstance(json_witness[k], list):
self.assertEqual(list(witness[k].values()), json_witness[k])
else:
self.assertEqual(witness[k], json_witness[k])
| 31.379085 | 117 | 0.569881 |
ace8fb2e069fb1ebaae41c69a50dff82e7a3307c | 1,689 | py | Python | src/config/setting.py | jack139/trouble | 00f3f8a84229d71aaa507bd6b8eb2ccbd4e32ac0 | [
"BSD-3-Clause"
] | null | null | null | src/config/setting.py | jack139/trouble | 00f3f8a84229d71aaa507bd6b8eb2ccbd4e32ac0 | [
"BSD-3-Clause"
] | null | null | null | src/config/setting.py | jack139/trouble | 00f3f8a84229d71aaa507bd6b8eb2ccbd4e32ac0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import web
from pymongo import MongoClient
#####
debug_mode = True # Flase - production, True - staging
#####
#
enable_proxy = True
http_proxy = 'http://192.168.2.108:8888'
https_proxy = 'https://192.168.2.108:8888'
proxy_list = ['192.168.2.103']
enable_local_test = True
#####
db_serv_list='127.0.0.1'
# db_serv_list='mongodb://10.168.11.151:27017,10.252.95.145:27017,10.252.171.8:27017/?replicaSet=rs0'
cli = {
'web' : MongoClient(db_serv_list),
}
# MongoClient('10.168.11.151', replicaset='rs0', readPreference='secondaryPreferred') # 使用secondary 读
# MongoClient('mongodb://10.168.11.151:27017,10.252.95.145:27017,10.252.171.8:27017/?replicaSet=rs0')
db_web = cli['web']['trouble_db']
db_web.authenticate('ipcam','ipcam')
db_primary = db_web
thread_num = 1
auth_user = ['test','gt']
cs_admin = ['cs0']
tmp_path = '/usr/local/nginx/html/xah/static/tmp'
logs_path = '/usr/local/nginx/logs'
image_store_path = '/usr/share/nginx/html/xah/static/upload'
app_host='xah-wx.xmu.edu.cn'
wx_host='xah-wx.xmu.edu.cn'
image_host='xah-wx.xmu.edu.cn/static'
notify_host='xah-wx.xmu.edu.cn'
app_pool=['xah-wx.xmu.edu.cn']
WX_store = {
'000' : { # 测试
'wx_appid' : '',
'wx_appsecret' : '9e9ee82b0f6083311ec1c51e726dedf0',
'mch_id' : '1408035102',
},
}
# 微信设置
region_id = '000'
wx_setting = WX_store[region_id]
order_fuffix=''
http_port=8000
https_port=443
mail_server='127.0.0.1'
sender='"jack139"<jack139@gmail.com>'
worker=['jack139@gmail.com']
web.config.debug = debug_mode
config = web.storage(
email = 'jack139@gmail.com',
site_name = 'ipcam',
site_des = '',
static = '/static'
)
| 22.223684 | 101 | 0.677324 |
935a5396b9ab1626006272f41440ff5f9882ca96 | 14,631 | py | Python | tests/cli/test_run.py | mkp6781/zulip-terminal | c1977a4058277772a3abb96bb76631d69f3ce41c | [
"Apache-2.0"
] | null | null | null | tests/cli/test_run.py | mkp6781/zulip-terminal | c1977a4058277772a3abb96bb76631d69f3ce41c | [
"Apache-2.0"
] | null | null | null | tests/cli/test_run.py | mkp6781/zulip-terminal | c1977a4058277772a3abb96bb76631d69f3ce41c | [
"Apache-2.0"
] | null | null | null | import builtins
import os
import stat
import pytest
from zulipterminal.cli.run import (
THEMES,
_write_zuliprc,
exit_with_error,
get_login_id,
in_color,
main,
parse_args,
)
from zulipterminal.model import ServerConnectionFailure
from zulipterminal.version import ZT_VERSION
@pytest.mark.parametrize(
"color, code",
[
("red", "\x1b[91m"),
("green", "\x1b[92m"),
("yellow", "\x1b[93m"),
("blue", "\x1b[94m"),
("purple", "\x1b[95m"),
("cyan", "\x1b[96m"),
],
)
def test_in_color(color, code, text="some text"):
assert in_color(color, text) == code + text + "\x1b[0m"
@pytest.mark.parametrize(
"json, label",
[
(
dict(require_email_format_usernames=False, email_auth_enabled=True),
"Email or Username",
),
(
dict(require_email_format_usernames=False, email_auth_enabled=False),
"Username",
),
(dict(require_email_format_usernames=True, email_auth_enabled=True), "Email"),
(dict(require_email_format_usernames=True, email_auth_enabled=False), "Email"),
],
)
def test_get_login_id(mocker, json, label):
response = mocker.Mock(json=lambda: json)
mocked_get = mocker.patch("requests.get", return_value=response)
mocked_styled_input = mocker.patch(
"zulipterminal.cli.run.styled_input", return_value="input return value"
)
result = get_login_id("REALM_URL")
assert result == "input return value"
mocked_get.assert_called_with(url="REALM_URL/api/v1/server_settings")
mocked_styled_input.assert_called_with(label + ": ")
@pytest.mark.parametrize("options", ["-h", "--help"])
def test_main_help(capsys, options):
with pytest.raises(SystemExit):
main([options])
captured = capsys.readouterr()
lines = captured.out.strip().split("\n")
assert lines[0].startswith("usage: ")
required_arguments = {
"--theme THEME, -t THEME",
"-h, --help",
"-d, --debug",
"--list-themes",
"--profile",
"--config-file CONFIG_FILE, -c CONFIG_FILE",
"--autohide",
"--no-autohide",
"-v, --version",
"-e, --explore",
"--color-depth",
"--notify",
"--no-notify",
}
optional_argument_lines = {
line[2:] for line in lines if len(line) > 2 and line[2] == "-"
}
for line in optional_argument_lines:
assert any(line.startswith(arg) for arg in required_arguments)
assert captured.err == ""
@pytest.fixture
def minimal_zuliprc(tmpdir):
zuliprc_path = str(tmpdir) + "/zuliprc"
with open(zuliprc_path, "w") as f:
f.write("[api]") # minimal to avoid Exception
os.chmod(zuliprc_path, 0o600)
return zuliprc_path
def test_valid_zuliprc_but_no_connection(
capsys, mocker, minimal_zuliprc, server_connection_error="some_error"
):
mocker.patch(
"zulipterminal.core.Controller.__init__",
side_effect=ServerConnectionFailure(server_connection_error),
)
with pytest.raises(SystemExit) as e:
main(["-c", minimal_zuliprc])
assert str(e.value) == "1"
captured = capsys.readouterr()
lines = captured.out.strip().split("\n")
expected_lines = [
"Loading with:",
" theme 'zt_dark' specified with no config.",
" autohide setting 'no_autohide' specified with no config.",
" maximum footlinks value '3' specified with no config.",
" color depth setting '256' specified with no config.",
" notify setting 'disabled' specified with no config.",
"\x1b[91m",
f"Error connecting to Zulip server: {server_connection_error}.\x1b[0m",
]
assert lines == expected_lines
assert captured.err == ""
@pytest.mark.parametrize("bad_theme", ["c", "d"])
def test_warning_regarding_incomplete_theme(
capsys,
mocker,
monkeypatch,
minimal_zuliprc,
bad_theme,
server_connection_error="sce",
):
mocker.patch(
"zulipterminal.core.Controller.__init__",
side_effect=ServerConnectionFailure(server_connection_error),
)
monkeypatch.setitem(THEMES, bad_theme, [])
mocker.patch("zulipterminal.cli.run.all_themes", return_value=("a", "b", "c", "d"))
mocker.patch(
"zulipterminal.cli.run.complete_and_incomplete_themes",
return_value=(["a", "b"], ["c", "d"]),
)
with pytest.raises(SystemExit) as e:
main(["-c", minimal_zuliprc, "-t", bad_theme])
assert str(e.value) == "1"
captured = capsys.readouterr()
lines = captured.out.strip().split("\n")
expected_lines = [
"Loading with:",
f" theme '{bad_theme}' specified on command line.",
"\x1b[93m WARNING: Incomplete theme; results may vary!",
f" (you could try: {'a'}, {'b'})\x1b[0m",
" autohide setting 'no_autohide' specified with no config.",
" maximum footlinks value '3' specified with no config.",
" color depth setting '256' specified with no config.",
" notify setting 'disabled' specified with no config.",
"\x1b[91m",
f"Error connecting to Zulip server: {server_connection_error}.\x1b[0m",
]
assert lines == expected_lines
assert captured.err == ""
@pytest.mark.parametrize("options", ["-v", "--version"])
def test_zt_version(capsys, options):
with pytest.raises(SystemExit) as e:
main([options])
assert str(e.value) == "0"
captured = capsys.readouterr()
lines = captured.out.strip("\n")
expected = "Zulip Terminal " + ZT_VERSION
assert lines == expected
assert captured.err == ""
@pytest.mark.parametrize(
"option, autohide",
[
("--autohide", "autohide"),
("--no-autohide", "no_autohide"),
("--debug", None), # no-autohide by default
],
)
def test_parse_args_valid_autohide_option(option, autohide):
args = parse_args([option])
assert args.autohide == autohide
@pytest.mark.parametrize(
"options", [["--autohide", "--no-autohide"], ["--no-autohide", "--autohide"]]
)
def test_main_multiple_autohide_options(capsys, options):
with pytest.raises(SystemExit) as e:
main(options)
assert str(e.value) == "2"
captured = capsys.readouterr()
lines = captured.err.strip("\n")
lines = lines.split("pytest: ", 1)[1]
expected = f"error: argument {options[1]}: not allowed with argument {options[0]}"
assert lines == expected
@pytest.mark.parametrize(
"option, notify_option",
[
("--notify", "enabled"),
("--no-notify", "disabled"),
("--profile", None), # disabled by default
],
)
def test__parse_args_valid_notify_option(option, notify_option):
args = parse_args([option])
assert args.notify == notify_option
@pytest.mark.parametrize(
"options",
[
["--notify", "--no-notify"],
["--no-notify", "--notify"],
],
)
def test_main_multiple_notify_options(capsys, options):
with pytest.raises(SystemExit) as e:
main(options)
assert str(e.value) == "2"
captured = capsys.readouterr()
lines = captured.err.strip("\n")
lines = lines.split("pytest: ", 1)[1]
expected = f"error: argument {options[1]}: not allowed with argument {options[0]}"
assert lines == expected
# NOTE: Fixture is necessary to ensure unreadable dir is garbage-collected
# See pytest issue #7821
@pytest.fixture
def unreadable_dir(tmpdir):
unreadable_dir = tmpdir.mkdir("unreadable")
unreadable_dir.chmod(0)
if os.access(str(unreadable_dir), os.R_OK):
# Docker container or similar
pytest.skip("Directory was still readable")
yield tmpdir, unreadable_dir
unreadable_dir.chmod(0o755)
@pytest.mark.parametrize(
"path_to_use, expected_exception",
[
("unreadable", "PermissionError"),
("goodnewhome", "FileNotFoundError"),
],
ids=["valid_path_but_cannot_be_written_to", "path_does_not_exist"],
)
def test_main_cannot_write_zuliprc_given_good_credentials(
monkeypatch,
capsys,
mocker,
unreadable_dir,
path_to_use,
expected_exception,
):
tmpdir, unusable_path = unreadable_dir
# This is default base path to use
zuliprc_path = os.path.join(str(tmpdir), path_to_use)
monkeypatch.setenv("HOME", zuliprc_path)
# Give some arbitrary input and fake that it's always valid
mocker.patch.object(builtins, "input", lambda _: "text\n")
response = mocker.Mock(json=lambda: dict(api_key=""), status_code=200)
mocker.patch("zulipterminal.cli.run.get_api_key", return_value=(response, None))
with pytest.raises(SystemExit):
main([])
captured = capsys.readouterr()
lines = captured.out.strip().split("\n")
expected_line = (
"\x1b[91m"
f"{expected_exception}: zuliprc could not be created "
f"at {os.path.join(zuliprc_path, 'zuliprc')}"
"\x1b[0m"
)
assert lines[-1] == expected_line
@pytest.fixture
def parameterized_zuliprc(tmpdir):
def func(config):
zuliprc_path = str(tmpdir) + "/zuliprc"
with open(zuliprc_path, "w") as f:
f.write("[api]\n\n") # minimal to avoid Exception
f.write("[zterm]\n")
for key, value in config.items():
f.write(f"{key}={value}\n")
os.chmod(zuliprc_path, 0o600)
return zuliprc_path
return func
@pytest.mark.parametrize(
"config_key, config_value, footlinks_output",
[
("footlinks", "disabled", "'0' specified in zuliprc file from footlinks."),
("footlinks", "enabled", "'3' specified in zuliprc file from footlinks."),
("maximum-footlinks", "3", "'3' specified in zuliprc file."),
("maximum-footlinks", "0", "'0' specified in zuliprc file."),
],
ids=[
"footlinks_disabled",
"footlinks_enabled",
"maximum-footlinks_3",
"maximum-footlinks_0",
],
)
def test_successful_main_function_with_config(
capsys,
mocker,
parameterized_zuliprc,
config_key,
config_value,
footlinks_output,
):
config = {
"theme": "default",
"autohide": "autohide",
"notify": "enabled",
"color-depth": "256",
}
config[config_key] = config_value
zuliprc = parameterized_zuliprc(config)
mocker.patch("zulipterminal.core.Controller.__init__", return_value=None)
mocker.patch("zulipterminal.core.Controller.main", return_value=None)
with pytest.raises(SystemExit):
main(["-c", zuliprc])
captured = capsys.readouterr()
lines = captured.out.strip().split("\n")
expected_lines = [
"Loading with:",
" theme 'zt_dark' specified in zuliprc file (by alias 'default').",
" autohide setting 'autohide' specified in zuliprc file.",
f" maximum footlinks value {footlinks_output}",
" color depth setting '256' specified in zuliprc file.",
" notify setting 'enabled' specified in zuliprc file.",
]
assert lines == expected_lines
@pytest.mark.parametrize(
"zulip_config, error_message",
[
(
{"footlinks": "enabled", "maximum-footlinks": "3"},
"Footlinks property is not allowed alongside maximum-footlinks",
),
(
{"maximum-footlinks": "-3"},
"Minimum value allowed for maximum-footlinks is 0",
),
],
)
def test_main_error_with_invalid_zuliprc_options(
capsys,
mocker,
parameterized_zuliprc,
zulip_config,
error_message,
):
zuliprc = parameterized_zuliprc(zulip_config)
mocker.patch("zulipterminal.core.Controller.__init__", return_value=None)
mocker.patch("zulipterminal.core.Controller.main", return_value=None)
with pytest.raises(SystemExit) as e:
main(["-c", zuliprc])
assert str(e.value) == "1"
captured = capsys.readouterr()
lines = captured.out.strip()
expected_lines = f"\033[91m{error_message}\033[0m"
assert lines == expected_lines
@pytest.mark.parametrize(
"error_code, helper_text",
[
(1, ""),
(2, "helper"),
],
)
def test_exit_with_error(error_code, helper_text, capsys, error_message="some text"):
with pytest.raises(SystemExit) as e:
exit_with_error(
error_message=error_message, helper_text=helper_text, error_code=error_code
)
assert str(e.value) == str(error_code)
captured = capsys.readouterr()
lines = captured.out.strip().split("\n")
expected_line = f"\033[91m{error_message}\033[0m"
assert lines[0] == expected_line
if helper_text:
assert lines[1] == helper_text
def test__write_zuliprc__success(tmpdir, id="id", key="key", url="url"):
path = os.path.join(str(tmpdir), "zuliprc")
error_message = _write_zuliprc(path, api_key=key, server_url=url, login_id=id)
assert error_message == ""
expected_contents = f"[api]\nemail={id}\nkey={key}\nsite={url}"
with open(path) as f:
assert f.read() == expected_contents
assert stat.filemode(os.stat(path).st_mode)[-6:] == 6 * "-"
def test__write_zuliprc__fail_file_exists(
minimal_zuliprc, tmpdir, id="id", key="key", url="url"
):
path = os.path.join(str(tmpdir), "zuliprc")
error_message = _write_zuliprc(path, api_key=key, server_url=url, login_id=id)
assert error_message == "zuliprc already exists at " + path
@pytest.mark.parametrize(
"mode",
[
# Avoid reformatting to retain readability of grid of values
# fmt:off
0o77, 0o70, 0o07,
0o66, 0o60, 0o06,
0o55, 0o50, 0o05,
0o44, 0o40, 0o04,
0o33, 0o30, 0o03,
0o22, 0o20, 0o02,
0o11, 0o10, 0o01,
# fmt:on
],
)
def test_show_error_if_loading_zuliprc_with_open_permissions(
capsys, minimal_zuliprc, mode
):
mode += 0o600
os.chmod(minimal_zuliprc, mode)
current_mode = stat.filemode(os.stat(minimal_zuliprc).st_mode)
with pytest.raises(SystemExit) as e:
main(["-c", minimal_zuliprc])
assert str(e.value) == "1"
captured = capsys.readouterr()
lines = captured.out.split("\n")[:-1]
expected_last_lines = [
f"(it currently has permissions '{current_mode}')",
"This can often be achieved with a command such as:",
f" chmod og-rwx {minimal_zuliprc}",
"Consider regenerating the [api] part of your zuliprc to ensure "
"your account is secure."
"\x1b[0m",
]
assert lines[-4:] == expected_last_lines
assert captured.err == ""
| 28.688235 | 87 | 0.631741 |
76ba42d1732b02e04a6ed07c711fa8f84da86ec2 | 5,991 | py | Python | test/test_plugin_enigma2.py | caronc/pnotify | 4c57d76ee7d88b04b3ac2667f91d9e7062486415 | [
"MIT"
] | null | null | null | test/test_plugin_enigma2.py | caronc/pnotify | 4c57d76ee7d88b04b3ac2667f91d9e7062486415 | [
"MIT"
] | null | null | null | test/test_plugin_enigma2.py | caronc/pnotify | 4c57d76ee7d88b04b3ac2667f91d9e7062486415 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from apprise import plugins
import requests
from helpers import AppriseURLTester
# Disable logging for a cleaner testing output
import logging
logging.disable(logging.CRITICAL)
# Our Testing URLs
apprise_url_tests = (
('enigma2://:@/', {
'instance': None,
}),
('enigma2://', {
'instance': None,
}),
('enigma2s://', {
'instance': None,
}),
('enigma2://localhost', {
'instance': plugins.NotifyEnigma2,
# This will fail because we're also expecting a server acknowledgement
'notify_response': False,
}),
('enigma2://localhost', {
'instance': plugins.NotifyEnigma2,
# invalid JSON response
'requests_response_text': '{',
'notify_response': False,
}),
('enigma2://localhost', {
'instance': plugins.NotifyEnigma2,
# False is returned
'requests_response_text': {
'result': False
},
'notify_response': False,
}),
('enigma2://localhost', {
'instance': plugins.NotifyEnigma2,
# With the right content, this will succeed
'requests_response_text': {
'result': True
}
}),
('enigma2://user@localhost', {
'instance': plugins.NotifyEnigma2,
'requests_response_text': {
'result': True
}
}),
# Set timeout
('enigma2://user@localhost?timeout=-1', {
'instance': plugins.NotifyEnigma2,
'requests_response_text': {
'result': True
}
}),
# Set timeout
('enigma2://user@localhost?timeout=-1000', {
'instance': plugins.NotifyEnigma2,
'requests_response_text': {
'result': True
}
}),
# Set invalid timeout (defaults to a set value)
('enigma2://user@localhost?timeout=invalid', {
'instance': plugins.NotifyEnigma2,
'requests_response_text': {
'result': True
}
}),
('enigma2://user:pass@localhost', {
'instance': plugins.NotifyEnigma2,
'requests_response_text': {
'result': True
},
# Our expected url(privacy=True) startswith() response:
'privacy_url': 'enigma2://user:****@localhost',
}),
('enigma2://localhost:8080', {
'instance': plugins.NotifyEnigma2,
'requests_response_text': {
'result': True
},
}),
('enigma2://user:pass@localhost:8080', {
'instance': plugins.NotifyEnigma2,
'requests_response_text': {
'result': True
},
}),
('enigma2s://localhost', {
'instance': plugins.NotifyEnigma2,
'requests_response_text': {
'result': True
},
}),
('enigma2s://user:pass@localhost', {
'instance': plugins.NotifyEnigma2,
'requests_response_text': {
'result': True
},
# Our expected url(privacy=True) startswith() response:
'privacy_url': 'enigma2s://user:****@localhost',
}),
('enigma2s://localhost:8080/path/', {
'instance': plugins.NotifyEnigma2,
'requests_response_text': {
'result': True
},
# Our expected url(privacy=True) startswith() response:
'privacy_url': 'enigma2s://localhost:8080/path/',
}),
('enigma2s://user:pass@localhost:8080', {
'instance': plugins.NotifyEnigma2,
'requests_response_text': {
'result': True
},
}),
('enigma2://localhost:8080/path?+HeaderKey=HeaderValue', {
'instance': plugins.NotifyEnigma2,
'requests_response_text': {
'result': True
},
}),
('enigma2://user:pass@localhost:8081', {
'instance': plugins.NotifyEnigma2,
'requests_response_text': {
'result': True
},
# force a failure
'response': False,
'requests_response_code': requests.codes.internal_server_error,
}),
('enigma2://user:pass@localhost:8082', {
'instance': plugins.NotifyEnigma2,
'requests_response_text': {
'result': True
},
# throw a bizzare code forcing us to fail to look it up
'response': False,
'requests_response_code': 999,
}),
('enigma2://user:pass@localhost:8083', {
'instance': plugins.NotifyEnigma2,
'requests_response_text': {
'result': True
},
# Throws a series of connection and transfer exceptions when this flag
# is set and tests that we gracfully handle them
'test_requests_exceptions': True,
}),
)
def test_plugin_enigma2_urls():
"""
NotifyEnigma2() Apprise URLs
"""
# Run our general tests
AppriseURLTester(tests=apprise_url_tests).run_all()
| 31.366492 | 79 | 0.602904 |
be6e5d3382d9858116246c092f791658fc96bf03 | 13,197 | py | Python | tools/nntool/utils/tabular.py | knmcguire/gap_sdk | 7b0a09a353ab6f0550793d40bd46e98051f4a3d7 | [
"Apache-2.0"
] | null | null | null | tools/nntool/utils/tabular.py | knmcguire/gap_sdk | 7b0a09a353ab6f0550793d40bd46e98051f4a3d7 | [
"Apache-2.0"
] | null | null | null | tools/nntool/utils/tabular.py | knmcguire/gap_sdk | 7b0a09a353ab6f0550793d40bd46e98051f4a3d7 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2019 GreenWaves Technologies
# All rights reserved.
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
from abc import ABC, abstractmethod
import csv
from texttable import Texttable
import xlsxwriter as xl
ALLOWED_TYPES = set(list('nbox%fFegwWdDsSl') +\
['t' + c for c in 'ieahgcts'])
class Fmt:
# borrowed from https://github.com/r1chardj0n3s/parse
def __init__(self, fmt, extra_types=None):
'''Pull apart the format [[fill]align][0][width][.precision][type]
'''
self._fill = self._align = ""
if fmt[0] in '<>=^':
self._align = fmt[0]
fmt = fmt[1:]
elif len(fmt) > 1 and fmt[1] in '<>=^':
self._fill = fmt[0]
self._align = fmt[1]
fmt = fmt[2:]
self._zero = False
if fmt and fmt[0] == '0':
self._zero = True
fmt = fmt[1:]
self._width = ''
while fmt:
if not fmt[0].isdigit():
break
self._width += fmt[0]
fmt = fmt[1:]
self._precision = ''
if fmt.startswith('.'):
# Precision isn't needed but we need to capture it so that
# the ValueError isn't raised.
fmt = fmt[1:] # drop the '.'
while fmt:
if not fmt[0].isdigit():
break
self._precision += fmt[0]
fmt = fmt[1:]
# the rest is the type, if present
if fmt and fmt not in ALLOWED_TYPES and\
(extra_types is None or\
fmt not in extra_types):
raise ValueError('format spec %r not recognised' % fmt)
self._fmt = fmt
@property
def fill(self):
return self._fill
@property
def width(self):
return self._width
@property
def zero(self):
return self._zero
@property
def align(self):
return self._align
@property
def fmt(self):
return self._fmt
def apply(self, elem):
return ("{:"+str(self)+"}").format(elem)
def applyfn(self):
def format_fn(elem):
if str(elem) == "":
return elem
return ("{:"+str(self)+"}").format(elem)
return format_fn
def __str__(self):
if self._precision:
return "{}{}{}{}.{}{}".format(\
self._fill, self._align, self._zero and '0' or '',\
self._width, self._precision, self._fmt)
return "{}{}{}{}{}".format(\
self._fill, self._align, self._zero and '0' or '',\
self._width, self._fmt)
def __eq__(self, other):
return self.__dict__ == other.__dict__
class AbstractTabularColumn(ABC):
def __init__(self, show=True):
self._show = show
@property
@abstractmethod
def is_container(self):
pass
@property
@abstractmethod
def contents(self):
pass
@property
def show(self):
return self._show
@show.setter
def show(self, val):
self._show = val
def __eq__(self, other):
return self.__dict__ == other.__dict__
class TabularColumnGroup(AbstractTabularColumn):
def __init__(self, name, columns=None, header_fmt="^", header_valign="c", show=True):
super().__init__(show)
self._name = name
self._header_fmt = Fmt(header_fmt)
self._header_valign = header_valign
if columns is None:
self._columns = []
else:
self._columns = columns
@property
def is_container(self):
return True
@property
def contents(self):
return self._columns
@property
def name(self):
return self._name
@property
def header_fmt(self):
return self._header_fmt
@property
def header_valign(self):
return self._header_valign
class TabularColumn(AbstractTabularColumn):
def __init__(self, name, fmt="<s", valign="c",
header_fmt="^", header_valign="c", show=True):
super().__init__(show)
self._name = name
self._fmt = Fmt(fmt)
self._valign = valign
self._header_fmt = Fmt(header_fmt)
self._header_valign = header_valign
@property
def is_container(self):
return False
@property
def contents(self):
return [self]
@property
def name(self):
return self._name
@property
def fmt(self):
return self._fmt
@property
def valign(self):
return self._valign
@property
def header_fmt(self):
return self._header_fmt
@property
def header_valign(self):
return self._header_valign
class TabularRenderer(ABC):
@abstractmethod
def render(self, table):
pass
class Tabular:
def __init__(self):
self._rows = []
self._current_header = None
self._current_header_len = 0
def add_row(self, row):
if row:
if isinstance(row[0], AbstractTabularColumn):
new_header_len = sum([len(col.contents) for col in row])
assert self._current_header is None or self._current_header_len == new_header_len,\
"new header must have the same length"
self._current_header = row
self._current_header_len = new_header_len
else:
assert self._current_header is not None, "no header defined"
assert len(row) == self._current_header_len, "length missmatch"
else:
assert self._current_header is not None, "no header defined"
self._rows.append(row)
@property
def rows(self):
return self._rows
def render(self, renderer):
renderer.render(self)
def __eq__(self, other):
return self.__dict__ == other.__dict__
class CSVRenderer(TabularRenderer):
def __init__(self, output, **kwargs):
self._output = output
self._fmtparams = kwargs
def render(self, table):
writer = csv.writer(self._output, **self._fmtparams)
current_header = None
for row in table.rows:
if row and isinstance(row[0], AbstractTabularColumn):
current_header = row
headers = []
for header in current_header:
for header_elem in header.contents:
if header_elem.show:
headers.append(header_elem.header_fmt.apply(header_elem.name\
.replace('\n', ' ').replace('\r', '')))
else:
headers.append("")
writer.writerow(headers)
continue
if not row:
writer.writerow([""]*0)
continue
orow = []
row_elem_idx = 0
for header in current_header:
for header_elem in header.contents:
orow.append(header.fmt.apply(row[row_elem_idx]))
row_elem_idx += 1
writer.writerow(orow)
def get_textable_align(fmt):
if fmt.align == "<":
return 'l'
if fmt.align == ">":
return 'r'
if fmt.align == "^":
return 'c'
return 'l'
def get_textable_dtype(fmt):
return fmt.applyfn()
def get_textable_valign(valign):
return valign
class TextTableRenderer(TabularRenderer):
def __init__(self, maxwidth=80, chars=None):
self._maxwidth = maxwidth
self._chars = chars
self._output = None
def render(self, table):
writer = Texttable(self._maxwidth)
if self._chars:
writer.set_chars(self._chars)
current_header = None
for row in table.rows:
if current_header is None and row and\
isinstance(row[0], AbstractTabularColumn):
current_header = row
headers = []
align = []
valign = []
dtype = []
for header in current_header:
for header_elem in header.contents:
if header_elem.show:
headers.append(header_elem.header_fmt.apply(header_elem.name))
else:
headers.append("")
align.append(get_textable_align(header_elem.fmt))
valign.append(get_textable_valign(header_elem.valign))
dtype.append(get_textable_dtype(header_elem.fmt))
writer.header(headers)
writer.set_cols_align(align)
writer.set_cols_dtype(dtype)
writer.set_cols_valign(valign)
continue
if not row:
writer.add_row([""]*0)
continue
writer.add_row(row)
self._output = writer.draw()
def get_output(self):
return self._output
def get_excel_align(fmt):
if fmt.align == "<":
return 'left'
if fmt.align == ">":
return 'right'
if fmt.align == "^":
return 'center'
return 'left'
def get_excel_valign(valign):
if valign == "t":
return 'top'
if valign == "b":
return 'vbottom'
if valign == "c":
return 'vcenter'
return 'vcenter'
def excel_format(work_book, fmt, valign, header=False):
return work_book.add_format({
'bold': header,
'align': get_excel_align(fmt),
'valign': get_excel_valign(valign)
})
class ExcelRenderer(TabularRenderer):
def __init__(self, filename):
self._filename = filename
def render(self, table):
work_book = xl.Workbook(self._filename)
work_sheet = work_book.add_worksheet()
xls_row = 0
widths = None
for row in table.rows:
row_nl = 0
if row and isinstance(row[0], AbstractTabularColumn):
if widths is None:
widths = [0] * sum([len(elem.contents) for elem in row])
col_idx = 0
has_groups = False
for header in row:
if isinstance(header, TabularColumnGroup):
has_groups = True
work_sheet.merge_range(\
xls_row, col_idx, xls_row, col_idx + len(header.contents) - 1,\
header.name, excel_format(work_book,
header.header_fmt,
header.header_valign,
True))
col_idx += len(header.contents)
nls = str(header.name).count("\n")
if nls > row_nl:
row_nl = nls
else:
col_idx += 1
if has_groups:
xls_row += 1
col_idx = 0
formats = []
for header in row:
for header_elem in header.contents:
if header_elem.show:
if len(header_elem.name) > widths[col_idx]:
widths[col_idx] = len(header_elem.name)
work_sheet.write(xls_row,
col_idx,
header_elem.name,
excel_format(work_book,
header_elem.header_fmt,
header_elem.header_valign,
True))
nls = str(header_elem.name).count("\n")
if nls > row_nl:
row_nl = nls
formats.append(excel_format(work_book,
header_elem.fmt,
header_elem.valign))
col_idx += 1
if row_nl > 0:
work_sheet.set_row(xls_row, 15 * (row_nl + 1))
xls_row += 1
continue
if not row:
xls_row += 1
continue
for row_elem_idx, row_elem in enumerate(row):
if len(str(row_elem)) > widths[row_elem_idx]:
widths[row_elem_idx] = len(str(row_elem))
work_sheet.write(xls_row, row_elem_idx, row_elem, formats[row_elem_idx])
nls = str(row_elem).count("\n")
if nls > row_nl:
row_nl = nls
if row_nl > 0:
work_sheet.set_row(xls_row, 15 * (row_nl + 1))
xls_row += 1
for col, width in enumerate(widths):
work_sheet.set_column(col, col, width)
work_book.close()
| 29.722973 | 99 | 0.507918 |
489449326f353defd6423c4a2e826914413f1d7d | 747 | py | Python | pycheck.io/ELEMENTARY/The Most Numbers.py | jz2010927/pycheck.io | 627898bed6fe712921f082c53a1858b8d80835a4 | [
"MIT"
] | null | null | null | pycheck.io/ELEMENTARY/The Most Numbers.py | jz2010927/pycheck.io | 627898bed6fe712921f082c53a1858b8d80835a4 | [
"MIT"
] | null | null | null | pycheck.io/ELEMENTARY/The Most Numbers.py | jz2010927/pycheck.io | 627898bed6fe712921f082c53a1858b8d80835a4 | [
"MIT"
] | null | null | null | def checkio(*args):
if len(args) > 0:
return max(args) - min(args)
return 0
#These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
def almost_equal(checked, correct, significant_digits):
precision = 0.1 ** significant_digits
return correct - precision < checked < correct + precision
assert almost_equal(checkio(1, 2, 3), 2, 3), "3-1=2"
assert almost_equal(checkio(5, -5), 10, 3), "5-(-5)=10"
assert almost_equal(checkio(10.2, -2.2, 0, 1.1, 0.5), 12.4, 3), "10.2-(-2.2)=12.4"
assert almost_equal(checkio(), 0, 3), "Empty"
print("Coding complete? Click 'Check' to review your tests and earn cool rewards!") | 41.5 | 87 | 0.62249 |
e95739dd978710cb70fb304875d3831171d06ef4 | 1,873 | py | Python | pipelines/optuna-pipeline/optuna-pipeline.py | tonouchi510/kfp-project | 67b78ae53cc3de594b8254999a4f553a8d5cec27 | [
"MIT"
] | null | null | null | pipelines/optuna-pipeline/optuna-pipeline.py | tonouchi510/kfp-project | 67b78ae53cc3de594b8254999a4f553a8d5cec27 | [
"MIT"
] | null | null | null | pipelines/optuna-pipeline/optuna-pipeline.py | tonouchi510/kfp-project | 67b78ae53cc3de594b8254999a4f553a8d5cec27 | [
"MIT"
] | null | null | null | import kfp
from kfp import dsl
import os
# Initialize component store
component_store = kfp.components.ComponentStore(
local_search_paths=["pipelines/optuna-pipeline", "components"])
# Create component factories
optuna_op = component_store.load_component("optuna-worker")
slack_notification_op = component_store.load_component("slack-notification")
sidecar = kfp.dsl.Sidecar(
name="cloudsqlproxy",
image="gcr.io/cloudsql-docker/gce-proxy:1.14",
command=[
"/cloud_sql_proxy",
f"-instances={os.environ.get('GCP_PROJECT')}:{os.environ.get('GCP_REGION')}:{os.environ.get('DB_NAME')}=tcp:3306",
],
)
# Define pipeline
@dsl.pipeline(
name="optuna pipeline",
description="optuna pipeline"
)
def pipeline(
pipeline_name: str = "optuna-pipeline",
bucket_name: str = "",
job_id: str = "{{JOB_ID}}",
n_trials: int = 100,
n_jobs: int = 5,
training_pipeline_name: str = "head-pose-pipeline",
dataset: str = "",
epochs: int = 5
):
with dsl.ExitHandler(
exit_op=slack_notification_op(
pipeline_name=pipeline_name,
job_id=job_id,
message="Status: {{workflow.status}}"
).add_node_selector_constraint("cloud.google.com/gke-nodepool", "main-pool")
):
optuna_op(
pipeline_name=pipeline_name,
bucket_name=bucket_name,
job_id=job_id,
n_trials=n_trials,
n_jobs=n_jobs,
training_pipeline_name=training_pipeline_name,
dataset=dataset,
epochs=epochs
).set_display_name("optuna-worker")\
.add_node_selector_constraint("cloud.google.com/gke-nodepool", "cpu-pool")\
.add_sidecar(sidecar)\
.set_retry(num_retries=2)
if __name__ == "__main__":
kfp.compiler.Compiler().compile(pipeline, "optuna-pipeline.yaml")
| 30.704918 | 122 | 0.654031 |
d4e6f4dc6a7682a6fc8760af766aac0f6811ee07 | 3,214 | py | Python | tests/test_scripted_effects_unused.py | Pelmen323/Kaiserreich_Autotests | 3ad60ae09bb6d15922c5f581c0b1d80c3e8f08de | [
"MIT"
] | null | null | null | tests/test_scripted_effects_unused.py | Pelmen323/Kaiserreich_Autotests | 3ad60ae09bb6d15922c5f581c0b1d80c3e8f08de | [
"MIT"
] | null | null | null | tests/test_scripted_effects_unused.py | Pelmen323/Kaiserreich_Autotests | 3ad60ae09bb6d15922c5f581c0b1d80c3e8f08de | [
"MIT"
] | null | null | null | ##########################
# Test script to check if there are scripted effects that are not used via "xxx = yes"
# By Pelmen, https://github.com/Pelmen323
##########################
import glob
import os
import re
from ..test_classes.generic_test_class import FileOpener, DataCleaner, ResultsReporter
FALSE_POSITIVES = [
'generate_generic_sics_and_activate',
'destroy_all_ships',
'decrease_state_category_by_one_level',
'gain_random_agency_upgrade',
'lec_american_fall',
'ott_create_abdulmecid',
'clear_sabotaged_resources_if_necesary',
'reduce_conscription_to_volunteer',
'reduce_conscription_to_disarmed',
'decrease_mobilisation',
'disband_fifty_percent_units',
'ant_setpr_leaders',
'cze_jiri_stribrny_sic',
'cze_alois_podhajsky_sic',
'cze_radola_gajda_sic',
'cze_rudolf_lodgman_sic',
'cze_moric_hruban_sic',
'cze_premysl_samal_sic',
'cze_karel_egnlis_sic',
'cze_rudolf_beran_sic',
'cze_rudolf_bechyne_sic',
'cze_zdenek_fierlinger_sic',
'cze_antonin_novotny_sic',
'cze_jaroslav_hasek_sic',
'cze_jan_sverma_sic',
'cze_bohumil_stasek_sic',
'cze_frantisek_hodac_sic',
'cze_emanuel_vajtauer_sic',
'create_snp_right',
'cub_remove_autentico_generals',
'fng_nupop',
'fng_zppop',
'gal_generals_join_ukraine',
'gal_characters_join_ukraine_immediate',
'clear_relations_with_prev',
'generate_generic_military_advisors_low_level',
'remove_civilian_advisor_roles',
'remove_military_advisor_roles',
'log_rp_eastern_military',
'log_aus_full_empire',
]
def test_check_scripted_effects_unused(test_runner: object):
filepath = test_runner.full_path_to_mod
filepath_to_effects = f'{test_runner.full_path_to_mod}common\\scripted_effects\\'
dict_with_scripted_effects = {}
paths = {}
# 1. Get the dict of all scripted effects
for filename in glob.iglob(filepath_to_effects + '**/*.txt', recursive=True):
text_file = FileOpener.open_text_file(filename)
text_file_splitted = text_file.split('\n')
for line in range(len(text_file_splitted)):
current_line = text_file_splitted[line-1]
pattern_matches = re.findall('^[a-zA-Z0-9_\\.]* = \\{', current_line)
if len(pattern_matches) > 0:
match = pattern_matches[0][:-4].strip()
dict_with_scripted_effects[match] = 0
paths[match] = os.path.basename(filename)
# 2. Find if scripted effects are used:
dict_with_scripted_effects = DataCleaner.clear_false_positives(input_iter=dict_with_scripted_effects, false_positives=FALSE_POSITIVES)
for filename in glob.iglob(filepath + '**/*.txt', recursive=True):
text_file = FileOpener.open_text_file(filename)
not_encountered_effects = [i for i in dict_with_scripted_effects.keys() if dict_with_scripted_effects[i] == 0]
if ' = yes' in text_file:
for key in not_encountered_effects:
if f'{key} = yes' in text_file:
dict_with_scripted_effects[key] += 1
results = [i for i in dict_with_scripted_effects.keys() if dict_with_scripted_effects[i] == 0]
ResultsReporter.report_results(results=results, paths=paths, message="Unused scripted effects were encountered. Check console output")
| 38.261905 | 138 | 0.734287 |
80307a635261fd66ddc3bcf72111c375dd60783b | 4,100 | py | Python | benchmark/startQiskit_noisy1248.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy1248.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy1248.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=5
# total number=43
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
prog.h(input_qubit[0]) # number=40
prog.cz(input_qubit[3],input_qubit[0]) # number=41
prog.h(input_qubit[0]) # number=42
prog.z(input_qubit[3]) # number=33
prog.cx(input_qubit[3],input_qubit[0]) # number=34
prog.rx(0.11938052083641225,input_qubit[1]) # number=36
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.rx(1.4765485471872026,input_qubit[2]) # number=35
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.x(input_qubit[0]) # number=9
prog.x(input_qubit[4]) # number=30
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.rx(0.45238934211692994,input_qubit[3]) # number=38
prog.y(input_qubit[1]) # number=39
prog.rx(-2.5258404934861938,input_qubit[1]) # number=25
prog.h(input_qubit[3]) # number=29
prog.cx(input_qubit[0],input_qubit[3]) # number=22
prog.x(input_qubit[3]) # number=23
prog.cx(input_qubit[0],input_qubit[3]) # number=24
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.rx(-0.0722566310325653,input_qubit[4]) # number=37
prog.x(input_qubit[1]) # number=14
prog.cx(input_qubit[0],input_qubit[2]) # number=26
prog.x(input_qubit[2]) # number=27
prog.cx(input_qubit[0],input_qubit[2]) # number=28
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = FakeVigo()
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy1248.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 31.538462 | 82 | 0.619512 |
ef00c6969d5c74e49c2d2be394d67d0efc61d7e9 | 5,734 | py | Python | backend-tests/tests/test_tenantadm.py | kacf/integration | d617eed3c1e8dbbddda716422c168ad1d0fad8e0 | [
"Apache-2.0"
] | null | null | null | backend-tests/tests/test_tenantadm.py | kacf/integration | d617eed3c1e8dbbddda716422c168ad1d0fad8e0 | [
"Apache-2.0"
] | 104 | 2020-07-06T06:03:41.000Z | 2022-02-04T08:26:31.000Z | backend-tests/tests/test_tenantadm.py | kacf/integration | d617eed3c1e8dbbddda716422c168ad1d0fad8e0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pytest
import subprocess
import time
import uuid
from testutils.infra import cli
from testutils import api
from testutils.common import (
mongo,
mongo_cleanup,
)
logger = logging.getLogger(__name__)
@pytest.yield_fixture(scope="function")
def clean_mongo_tenant_migration(mongo):
mongo_cleanup(mongo)
tenant_cli = cli.CliTenantadm()
tenant_cli.migrate()
yield mongo
mongo_cleanup(mongo)
class TestCreateOrganizationCLIEnterprise:
api_mgmt_useradm = api.client.ApiClient(api.useradm.URL_MGMT)
logger = logger.getChild("TestCreateOrganizationCLIEnterprise")
def test_success(self, clean_mongo_tenant_migration):
"""
Create a single organization and verify that the created user
is able to log in.
"""
tenantadm_cli = cli.CliTenantadm()
self.logger.info("Starting `test_success`")
uuidv4 = str(uuid.uuid4())
name, username, password = (
"test.mender.io-" + uuidv4,
"some.user+" + uuidv4 + "@example.com",
"secretsecret",
)
tenant_id = tenantadm_cli.create_org(
name=name, username=username, password=password
)
self.logger.debug("Tenant id: %s" % tenant_id)
# Retry login every second for 3 min
for i in range(60 * 3):
rsp = self.api_mgmt_useradm.call(
"POST", api.useradm.URL_LOGIN, auth=(username, password)
)
if rsp.status_code == 200:
break
time.sleep(1)
assert rsp.status_code == 200
self.logger.info("`test_success` finished successfully.")
def test_duplicate_username(self, clean_mongo_tenant_migration):
"""
Duplicate username (e-mail) should not be allowed, as this
leads to conflicting login credentials.
"""
tenantadm_cli = cli.CliTenantadm()
self.logger.debug("Starting `test_duplicate_username`")
self.logger.debug("First tenant creation call")
uuidv4 = str(uuid.uuid4())
name, username, password = (
"test.mender.io-" + uuidv4,
"some.user+" + uuidv4 + "@example.com",
"secretsecret",
)
tenant_id = tenantadm_cli.create_org(
name=name, username=username, password=password
)
self.logger.debug("Tenant id: %s" % tenant_id)
# Retry login every second for 3 min
for i in range(60 * 3):
rsp = self.api_mgmt_useradm.call(
"POST", api.useradm.URL_LOGIN, auth=(username, password)
)
if rsp.status_code == 200:
self.logger.debug("Successfully logged into account")
break
time.sleep(1)
assert rsp.status_code == 200
try:
self.logger.debug("Second tenant creation call")
tenant_id = tenantadm_cli.create_org(
name=name, username=username, password="321password"
)
pytest.fail("Multiple users with the same username is not allowed")
except subprocess.CalledProcessError:
pass
self.logger.info("`test_duplicate_username` finished successfully.")
def test_duplicate_organization(self, clean_mongo_tenant_migration):
"""
It should be allowed to create duplicate organizations as long
as the user e-mails (login credentials) differ.
"""
self.logger.debug("Starting `test_duplicate_username`")
tenantadm_cli = cli.CliTenantadm()
uuidv4 = str(uuid.uuid4())
name, username, password = (
"test.mender.io-" + uuidv4,
"some.user+" + uuidv4 + "@example.com",
"secretsecret",
)
tenant_id = tenantadm_cli.create_org(
name=name, username=username, password=password
)
self.logger.debug("Tenant id: %s" % tenant_id)
# Retry login every second for 3 min
for i in range(60 * 3):
rsp = self.api_mgmt_useradm.call(
"POST", api.useradm.URL_LOGIN, auth=(username, password)
)
if rsp.status_code == 200:
self.logger.debug("Successfully logged into account")
break
time.sleep(1)
assert rsp.status_code == 200
name, username, password = (
"test.mender.io-" + uuidv4,
"some.other.user+" + uuidv4 + "@example.com",
"secretsecret",
)
tenant_id = tenantadm_cli.create_org(
name=name, username=username, password=password
)
self.logger.debug("Tenant id: %s" % tenant_id)
# Retry login every second for 3 min
for i in range(60 * 3):
rsp = self.api_mgmt_useradm.call(
"POST", api.useradm.URL_LOGIN, auth=(username, password)
)
if rsp.status_code == 200:
break
time.sleep(1)
assert rsp.status_code == 200
self.logger.info("`test_duplicate_username` finished successfully.")
| 33.928994 | 79 | 0.608301 |
677a897e2f3b01bfe68441a295ef7fe8753393f1 | 2,935 | py | Python | website/users/forms.py | eli-lam/flask-website | c003b6daf4fc92ef773a1650397a4b08c365704e | [
"MIT"
] | null | null | null | website/users/forms.py | eli-lam/flask-website | c003b6daf4fc92ef773a1650397a4b08c365704e | [
"MIT"
] | null | null | null | website/users/forms.py | eli-lam/flask-website | c003b6daf4fc92ef773a1650397a4b08c365704e | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from flask_login import current_user
from website.models import User
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Sign Up')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('That username is taken. Please choose a different one.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('There already exist an account with that email.')
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember Me')
submit = SubmitField('Login')
class UpdateAccountForm(FlaskForm):
username = StringField('Username', validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email', validators=[DataRequired(), Email()])
picture = FileField('Update profile picture', validators=[FileAllowed(['jpg', 'png', 'jpeg'])])
submit = SubmitField('Update')
def validate_username(self, username):
if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('That username is taken. Please choose a different one.')
def validate_email(self, email):
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('There already exist an account with that email.')
class RequestResetForm(FlaskForm):
email = StringField('Email',
validators=[DataRequired(), Email()])
submit = SubmitField('Request password reset')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is None:
raise ValidationError('There is no account with that email. You must register first.')
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Reset password') | 43.80597 | 106 | 0.698807 |
e9496fe9ea5cf6c9bf89b1462e813e35037c611e | 702 | py | Python | autoflow/scripts/mactab.py | sh-biswas/autoflow | 4c3a5f6b837bb6d546c81f11ef777eb805230b7e | [
"MIT"
] | 7 | 2020-11-18T16:29:31.000Z | 2021-09-08T19:06:40.000Z | autoflow/scripts/mactab.py | sksuryan/autoflow | ac1924e616e2d093de29719292b2a0fbad375dbd | [
"MIT"
] | 8 | 2020-11-18T17:15:11.000Z | 2020-11-27T15:05:52.000Z | autoflow/scripts/mactab.py | sksuryan/autoflow | ac1924e616e2d093de29719292b2a0fbad375dbd | [
"MIT"
] | 5 | 2020-11-19T14:15:03.000Z | 2021-10-29T18:22:01.000Z | from subprocess import Popen, PIPE
import sys
import os
def osascript(scpt):
p = Popen(['osascript', '-'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate(scpt.encode('utf-8'))
return stdout, stderr
def openTab():
script = f"""
tell application "System Events"
tell process "Terminal" to keystroke "t" using command down
end
tell application "Terminal"
activate
do script with command "cd {os.getcwd()}" in window 1
end tell
"""
stdout, stderr = osascript(script)
if stderr:
sys.stderr.write('Error in Applescript: {}\n'.format(stderr)) | 31.909091 | 75 | 0.586895 |
4a9e3d2f26a08b1a88f3eabe3716d0d175229947 | 1,190 | py | Python | nextcord/__init__.py | VincentRPS/nextcord-v3 | c34ac1adf9afad589703cc9ec904e39eb43303ac | [
"MIT"
] | null | null | null | nextcord/__init__.py | VincentRPS/nextcord-v3 | c34ac1adf9afad589703cc9ec904e39eb43303ac | [
"MIT"
] | null | null | null | nextcord/__init__.py | VincentRPS/nextcord-v3 | c34ac1adf9afad589703cc9ec904e39eb43303ac | [
"MIT"
] | null | null | null | # The MIT License (MIT)
#
# Copyright (c) 2021-present vcokltfre & tag-epic
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
__version__ = "3.0.0a"
from .client.client import Client
| 47.6 | 77 | 0.77479 |
9a3bd538bea03740ef7ae809014fdeee7a5f4f58 | 10,740 | py | Python | rootfs/api/tests/test_limits.py | drycc/controller | 7e9f3695df29af77d99729dc0185863f0791b183 | [
"Apache-2.0"
] | null | null | null | rootfs/api/tests/test_limits.py | drycc/controller | 7e9f3695df29af77d99729dc0185863f0791b183 | [
"Apache-2.0"
] | 19 | 2020-07-30T06:31:29.000Z | 2022-03-14T07:33:44.000Z | rootfs/api/tests/test_limits.py | drycc/controller | 7e9f3695df29af77d99729dc0185863f0791b183 | [
"Apache-2.0"
] | 9 | 2020-07-30T02:50:12.000Z | 2020-12-11T06:44:19.000Z | import json
import requests_mock
from django.core.cache import cache
from django.contrib.auth import get_user_model
from rest_framework.authtoken.models import Token
from api.serializers import MEMLIMIT_MATCH
from api.serializers import CPUSHARE_MATCH
from api.tests import adapter, DryccTransactionTestCase
User = get_user_model()
@requests_mock.Mocker(real_http=True, adapter=adapter)
class TestLimits(DryccTransactionTestCase):
"""Tests setting and updating config values"""
fixtures = ['tests.json']
def setUp(self):
self.user = User.objects.get(username='autotest')
self.token = Token.objects.get(user=self.user).key
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
def tearDown(self):
# make sure every test has a clean slate for k8s mocking
cache.clear()
def test_memlimit_regex(self, mock_requests):
"""Tests the regex for unit format used by "drycc limits:set --memory=<limit>"."""
self.assertTrue(MEMLIMIT_MATCH.match("2G"))
self.assertTrue(MEMLIMIT_MATCH.match("2M"))
self.assertTrue(MEMLIMIT_MATCH.match("20G"))
self.assertTrue(MEMLIMIT_MATCH.match("20M"))
self.assertTrue(MEMLIMIT_MATCH.match("2g"))
self.assertTrue(MEMLIMIT_MATCH.match("2m"))
self.assertTrue(MEMLIMIT_MATCH.match("20g"))
self.assertTrue(MEMLIMIT_MATCH.match("20m"))
self.assertFalse(MEMLIMIT_MATCH.match("0m"))
self.assertFalse(MEMLIMIT_MATCH.match("20MK"))
self.assertFalse(MEMLIMIT_MATCH.match("10"))
self.assertFalse(MEMLIMIT_MATCH.match("20gK"))
self.assertFalse(MEMLIMIT_MATCH.match("mb"))
self.assertFalse(MEMLIMIT_MATCH.match("0"))
def test_cpushare_regex(self, mock_requests):
"""Tests the regex for unit format used by "drycc limits:set --cpu=<limit>"."""
self.assertTrue(CPUSHARE_MATCH.match("500m"))
self.assertTrue(CPUSHARE_MATCH.match("2"))
self.assertTrue(CPUSHARE_MATCH.match("12"))
self.assertFalse(CPUSHARE_MATCH.match("0.5"))
self.assertFalse(CPUSHARE_MATCH.match(".123"))
self.assertFalse(CPUSHARE_MATCH.match("1.123"))
self.assertFalse(CPUSHARE_MATCH.match("0"))
self.assertFalse(CPUSHARE_MATCH.match("20MK"))
self.assertFalse(CPUSHARE_MATCH.match("20gK"))
self.assertFalse(CPUSHARE_MATCH.match("m"))
self.assertFalse(CPUSHARE_MATCH.match("."))
def test_request_limit(self, mock_requests):
"""
Test that limit is auto-created for a new app and that
limits can be updated using a PATCH
"""
app_id = self.create_app()
url = '/v2/apps/{app_id}/config'.format(**locals())
# check default limit
response = self.client.get(url)
self.assertEqual(response.status_code, 200, response.data)
self.assertIn('memory', response.data)
self.assertEqual(response.data['memory'], {})
# regression test for https://github.com/drycc/drycc/issues/1563
self.assertNotIn('"', response.data['memory'])
# set an initial limit
mem = {'web': '1G'}
cpu = {'web': '1000m'}
body = {'memory': json.dumps(mem), 'cpu': json.dumps(cpu)}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 201, response.data)
limit1 = response.data
# check memory limits
response = self.client.get(url)
self.assertEqual(response.status_code, 200, response.data)
self.assertIn('memory', response.data)
memory = response.data['memory']
self.assertIn('web', memory)
self.assertEqual(memory['web'], '1G')
# check cpu limits
self.assertIn('cpu', response.data)
cpu = response.data['cpu']
self.assertIn('web', cpu)
self.assertEqual(cpu['web'], '1000m')
# set an additional value
body = {'memory': json.dumps({'worker': '512M'}), 'cpu': json.dumps({'worker': '2000m'})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 201, response.data)
limit2 = response.data
self.assertNotEqual(limit1['uuid'], limit2['uuid'])
memory = response.data['memory']
self.assertIn('worker', memory)
self.assertEqual(memory['worker'], '2G')
self.assertIn('web', memory)
self.assertEqual(memory['web'], '1G')
cpu = response.data['cpu']
self.assertIn('worker', cpu)
self.assertEqual(cpu['worker'], '2000m')
self.assertIn('web', cpu)
self.assertEqual(cpu['web'], '1000m')
# read the limit again
response = self.client.get(url)
self.assertEqual(response.status_code, 200, response.data)
limit3 = response.data
self.assertEqual(limit2, limit3)
memory = response.data['memory']
self.assertIn('worker', memory)
self.assertEqual(memory['worker'], '2G')
self.assertIn('web', memory)
self.assertEqual(memory['web'], '1G')
cpu = response.data['cpu']
self.assertIn('worker', cpu)
self.assertEqual(cpu['worker'], '2000m')
self.assertIn('web', cpu)
self.assertEqual(cpu['web'], '1000m')
# regression test for https://github.com/drycc/drycc/issues/1613
# ensure that config:set doesn't wipe out previous limits
body = {'values': json.dumps({'NEW_URL2': 'http://localhost:8080/'})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 201, response.data)
self.assertIn('NEW_URL2', response.data['values'])
# read the limit again
response = self.client.get(url)
self.assertEqual(response.status_code, 200, response.data)
memory = response.data['memory']
self.assertIn('worker', memory)
self.assertEqual(memory['worker'], '2G')
self.assertIn('web', memory)
self.assertEqual(memory['web'], '1G')
cpu = response.data['cpu']
self.assertIn('worker', cpu)
self.assertEqual(cpu['worker'], '2000m')
self.assertIn('web', cpu)
self.assertEqual(cpu['web'], '1000m')
# add with requests/limits
body = {'memory': json.dumps({'db': '1G'}), 'cpu': json.dumps({'db': '1000m'})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 201, response.data)
# read the limit again
response = self.client.get(url)
self.assertEqual(response.status_code, 200, response.data)
memory = response.data['memory']
self.assertIn('worker', memory)
self.assertEqual(memory['worker'], '2G')
self.assertIn('web', memory)
self.assertEqual(memory['web'], '1G')
self.assertIn('db', memory)
self.assertEqual(memory['db'], '1G')
cpu = response.data['cpu']
self.assertIn('worker', cpu)
self.assertEqual(cpu['worker'], '2000m')
self.assertIn('web', cpu)
self.assertEqual(cpu['web'], '1000m')
self.assertIn('db', cpu)
self.assertEqual(cpu['db'], '1000m')
# replace one with requests/limits
body = {'memory': json.dumps({'web': '3G'})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 201, response.data)
# read the limit again
response = self.client.get(url)
self.assertEqual(response.status_code, 200, response.data)
memory = response.data['memory']
self.assertIn('worker', memory)
self.assertEqual(memory['worker'], '2G')
self.assertIn('web', memory)
self.assertEqual(memory['web'], '3G')
self.assertIn('db', memory)
self.assertEqual(memory['db'], '1G')
# replace one with requests/limits
body = {'cpu': json.dumps({'web': '3000m'})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 201, response.data)
# read the limit again
response = self.client.get(url)
self.assertEqual(response.status_code, 200, response.data)
cpu = response.data['cpu']
self.assertIn('worker', cpu)
self.assertEqual(cpu['worker'], '2000m')
self.assertIn('web', cpu)
self.assertEqual(cpu['web'], '3000m')
self.assertIn('db', cpu)
self.assertEqual(cpu['db'], '1000m')
# unset a value
body = {
'cpu': json.dumps({'worker': None}),
'memory': json.dumps({'worker': None})
}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 201, response.data)
limit4 = response.data
self.assertNotEqual(limit3['uuid'], limit4['uuid'])
self.assertNotIn('worker', json.dumps(response.data['memory']))
# bad memory values
mem = {'web': '1Z'}
body = {'memory': json.dumps(mem)}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 400, response.data)
# out of range
body = {'memory': json.dumps({'web': '64M'})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 400, response.data)
body = {'memory': json.dumps({'web': '1024G'})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 400, response.data)
mem = {'w3&b': '1G'}
body = {'memory': json.dumps(mem)}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 400, response.data)
# bad cpu values
mem = {'web': '1G'}
body = {'cpu': json.dumps(mem)}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 400, response.data)
# out of range
body = {'cpu': json.dumps({'web': '1000'})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 400, response.data)
body = {'cpu': json.dumps({'web': '125m'})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 201, response.data)
mem = {'w3&b': '1G'}
body = {'cpu': json.dumps(mem)}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 400, response.data)
# disallow put/patch/delete
response = self.client.put(url)
self.assertEqual(response.status_code, 405, response.data)
response = self.client.patch(url)
self.assertEqual(response.status_code, 405, response.data)
response = self.client.delete(url)
self.assertEqual(response.status_code, 405, response.data)
| 40.992366 | 97 | 0.621322 |
fd12cb8e998472f9ae1bd542d5db6e41ce2586ab | 3,560 | py | Python | implementation/tests/gcn-dgl.py | ZhuFanCheng/Thesis | eba9a7567a5c254acb2e78fdac0cda7dddabb327 | [
"MIT"
] | null | null | null | implementation/tests/gcn-dgl.py | ZhuFanCheng/Thesis | eba9a7567a5c254acb2e78fdac0cda7dddabb327 | [
"MIT"
] | null | null | null | implementation/tests/gcn-dgl.py | ZhuFanCheng/Thesis | eba9a7567a5c254acb2e78fdac0cda7dddabb327 | [
"MIT"
] | null | null | null | import dgl
import dgl.data
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch import GraphConv
import argparse
import dgl.nn
dataset = dgl.data.CoraGraphDataset()
g=dataset[0]
# class GCN(nn.Module):
# def __init__(self,
# g,
# in_feats,
# n_hidden,
# n_classes,
# n_layers,
# activation,
# dropout):
# super(GCN, self).__init__()
# self.g = g
# self.layers = nn.ModuleList()
# # input layer
# self.layers.append(GraphConv(in_feats, n_hidden, activation=activation))
# # hidden layers
# for i in range(n_layers - 1):
# self.layers.append(GraphConv(n_hidden, n_hidden, activation=activation))
# # output layer
# self.layers.append(GraphConv(n_hidden, n_classes))
# self.dropout = nn.Dropout(p=dropout)
# def forward(self, features):
# h = features
# for i, layer in enumerate(self.layers):
# if i != 0:
# h = self.dropout(h)
# h = layer(self.g, h)
# return h
class GCN(nn.Module):
def __init__(self,in_features, hidden_features, out_features):
super().__init__()
self.conv1 = dgl.nn.GraphConv(in_features, hidden_features)
self.conv2 = dgl.nn.GraphConv(hidden_features, out_features)
def forward(self, blocks, x):
x = F.relu(self.conv1(blocks[0], x))
x = F.relu(self.conv2(blocks[1], x))
return x
in_feats = g
# Create the model with given dimensions
model = GCN(in_features=g.ndata['feat'].shape[1], hidden_features=16, out_features=dataset.num_classes)
opt = torch.optim.Adam(model.parameters())
def train(g, model):
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
best_val_acc = 0
best_test_acc = 0
features = g.ndata['feat']
labels = g.ndata['label']
train_mask = g.ndata['train_mask']
val_mask = g.ndata['val_mask']
test_mask = g.ndata['test_mask']
sampler = dgl.dataloading.MultiLayerFullNeighborSampler(2)
dataloader = dgl.dataloading.NodeDataLoader(g, features, sampler,batch_size=1024,shuffle=True,drop_last=False,num_workers=4)
for e in range(200):
for input_nodes, output_nodes, blocks in dataloader:
blocks = [b.to(torch.device('cuda')) for b in blocks]
input_features = blocks[0].srcdata['features']
output_labels = blocks[-1].dstdata['label']
output_predictions = model(blocks, input_features)
output = output_predictions.argmax(1)
loss = F.cross_entropy(output[train_mask], labels[train_mask])
opt.zero_grad()
loss.backward()
opt.step()
train_acc = (pred[train_mask] == labels[train_mask]).float().mean()
val_acc = (pred[val_mask] == labels[val_mask]).float().mean()
test_acc = (pred[test_mask] == labels[test_mask]).float().mean()
if best_val_acc < val_acc:
best_val_acc = val_acc
best_test_acc = test_acc
if e % 5 == 0:
print('In epoch {}, loss: {:.3f}, val acc: {:.3f} (best {:.3f}), test acc: {:.3f} (best {:.3f})'.format(
e, loss, val_acc, best_val_acc, test_acc, best_test_acc))
if __name__ == '__main__':
train(g,model)
| 32.363636 | 128 | 0.574719 |
a07fc9630d3112fba76addf4f67488ca2b90d324 | 1,637 | py | Python | Shorty/urls.py | Rodrigo-NH/Shorty | 1c29f69918de163caef0ae95f743fb81002e14a5 | [
"MIT"
] | null | null | null | Shorty/urls.py | Rodrigo-NH/Shorty | 1c29f69918de163caef0ae95f743fb81002e14a5 | [
"MIT"
] | null | null | null | Shorty/urls.py | Rodrigo-NH/Shorty | 1c29f69918de163caef0ae95f743fb81002e14a5 | [
"MIT"
] | null | null | null | """Shorty URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from authentication.views import loginPage, signup, logout, passwordChange
from URLHandler.views import dashboard, generate, home, deleteurl
from home_shorty.views import short_generate,home_shortener
from django.conf.urls import include
urlpatterns = [
path('admin/', admin.site.urls),
path('', home),
path('signup/', signup, name="signup"),
path('loginPage/', loginPage, name="loginPage"),
path('logout/', logout, name="logout"),
path('passwordChange/', passwordChange, name="passwordChange"),
path('dashboard/', dashboard, name="dashboard"),
path('url_shorten/',home_shortener,name="home_shortener"),
path('generate/', generate, name="generate"),
path('shorten/',short_generate,name="shorten"),
path('deleteurl/', deleteurl, name="deleteurl"),
path('<str:query>/', home, name="home"),
path('qr_code/', include('qr_code.urls', namespace="qr_code")),
path('api/',include('api.urls')),
]
| 41.974359 | 77 | 0.704948 |
75af42c16295b431df24199666393b412fa58474 | 5,346 | py | Python | Text_Classify/utils/tools.py | Serpentera/Machine-Learning | eb037188f415cec6d50f218dfef466dfb742a207 | [
"MIT"
] | null | null | null | Text_Classify/utils/tools.py | Serpentera/Machine-Learning | eb037188f415cec6d50f218dfef466dfb742a207 | [
"MIT"
] | null | null | null | Text_Classify/utils/tools.py | Serpentera/Machine-Learning | eb037188f415cec6d50f218dfef466dfb742a207 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : lianggq
# @Time : 2019/7/19 10:24
# @FileName: tools.py
import numpy as np
import jieba
jieba.load_userdict('../stopword/myjieba.txt')
from tensorflow import keras as kr
from article_categories.config.config import FLAGS
def read_data(filename):
"""
对文件数据中标签和内容进行分离
:param filename: 数据文件名
:return:标签和内容列表
"""
try:
label_list = []
data_list = []
with open(filename, 'r', encoding='utf-8') as r:
lines = r.readlines()
for line in lines:
line = line.strip()
line = line.split('\t', 1)
label = line[0]
label_list.append(label)
data_list.append(line[1])
return label_list, data_list
except:
print('读取训练数据异常')
def build_stop_word(stopword_filename):
"""
引用停用词
:param stopword_filename:停用词文件名
:return:停用词列表
"""
try:
with open(stopword_filename, 'r', encoding='utf-8') as r:
stopword = r.read()
stop_word = stopword.split('\n')
stop_list = [word for word in stop_word]
return stop_list
except:
print('读取停用词异常')
def write_data_clean(labels, contents, label_filename, content_filename):
"""
对数据清洗之后写入文件,并判断数据和标签的长度是否相同
:param labels: 标签列表
:param contents: 数据列表
:param label_filename: 写入标签的文件
:param content_filename: 写入清洗数据的文件
:return:
"""
# 引用停用分词
stop_list = build_stop_word('../stopword/stopword.txt')
# 标签文件
with open(label_filename, 'w+', encoding='utf-8') as w:
for label in labels:
w.write(label + '\n')
# 数据文件
with open(content_filename, 'w+', encoding='utf-8') as f:
for content in contents:
word_list = []
document_cut = jieba.cut(content, cut_all=False)
for word in document_cut:
if word not in stop_list and word is not ' ':
word_list.append(word)
f.write(str(word_list).replace('\'', '').strip('[').strip(']') + '\n')
print('数据清洗完毕')
def build_vocab(data, vocab_file, vector_word_npz, vocab_size=100000):
"""构建词汇表和词汇长度表"""
word_count = {}
# 如何构建字典
with open(data, 'r', encoding='utf-8') as r:
for line in r.readlines():
line = line.split(',')
for word in line:
word_count[word.strip()] = word_count.get(word.strip(), 0) + 1
with open(vocab_file, 'w+', encoding='utf-8') as w:
# 对字典进行排序,统计词频前,获取前10000个元素作为词表
word_list = sorted(word_count.items(), key=lambda x: x[1], reverse=True)
for word in word_list[:vocab_size]:
w.write(word[0] + '\n')
# 构建向量表
np.save(vector_word_npz, np.array(word_list[:vocab_size]))
def buile_onehot(vocab_filename, clean_label, clean_content):
# {vocab:index}
vocab, vocab_id = bulid_vocab_id(vocab_filename)
# {类别:index}
label_to_id = build_class_id(clean_label)
words_to_id = []
labels_to_id = []
content_list = read_clean_content(clean_content)
label_list = read_clean_label(clean_label)
# 判断标签和内容的行数是否相等
if len(content_list) != len(label_list):
raise ValueError('line number was different with write source and target in files')
for i in range(len(content_list)):
words_to_id.append([vocab_id[word.strip()] for word in content_list[i].split(',') if word.strip() in vocab_id])
labels_to_id.append(label_to_id[label_list[i].strip('\n')])
# 使用keras模块提供的pad_sequence来将文本pad为固定长度
x_pad = kr.preprocessing.sequence.pad_sequences(words_to_id, maxlen=FLAGS.seq_length)
y_pad = kr.utils.to_categorical(labels_to_id, num_classes=len(label_to_id)) # 将y转换成one-hot向量
return x_pad, y_pad
def bulid_vocab_id(vocab_filename):
'''{vocab:index}'''
with open(vocab_filename, 'r', encoding='utf-8') as r:
vocab = r.readlines()
vocab = list(set([word.strip() for word in vocab]))
vocab_id = dict(zip(vocab, range(len(vocab))))
return vocab, vocab_id
def build_class_id(label_filename):
'''{类别:index}'''
label_dir = {}
with open(label_filename, 'r', encoding='utf-8') as r:
for label in r.readlines():
label_dir[label.strip('\n')] = label_dir.get(label.strip('\n'), 0) + 1
label_list = [labels for labels in label_dir.keys()]
label_to_id = dict(zip(label_list, range(len(label_list))))
return label_to_id
def read_clean_content(clean_content):
content_list = []
with open(clean_content, 'r', encoding='utf-8') as r:
for content in r.readlines():
content_list.append(content)
return content_list
def read_clean_label(clean_label):
label_list = []
with open(clean_label, 'r', encoding='utf-8') as r:
for label in r.readlines():
label_list.append(label)
return label_list
# 生成批次数据
def batch_iter(x, y, batch_size):
num_batch = int((len(x) - 1) / batch_size) + 1
indices = np.random.permutation(np.arange(len(x)))
x_shuffle = x[indices]
y_shuffle = y[indices]
for i in range(num_batch):
start_id = i * batch_size
end_id = min((i + 1) * batch_size, len(x))
yield x_shuffle[start_id:end_id], y_shuffle[start_id:end_id]
| 29.213115 | 119 | 0.620464 |
c85f39c3b2544295435a9dca47932e38bea5d2b3 | 908 | py | Python | zerver/webhooks/flock/view.py | pranayshahxyz/zulip | 3da483487af79fde9dce2d21124dfa39b94936a5 | [
"Apache-2.0"
] | 1 | 2020-03-19T00:52:48.000Z | 2020-03-19T00:52:48.000Z | zerver/webhooks/flock/view.py | pranayshahxyz/zulip | 3da483487af79fde9dce2d21124dfa39b94936a5 | [
"Apache-2.0"
] | null | null | null | zerver/webhooks/flock/view.py | pranayshahxyz/zulip | 3da483487af79fde9dce2d21124dfa39b94936a5 | [
"Apache-2.0"
] | 1 | 2020-07-06T11:43:28.000Z | 2020-07-06T11:43:28.000Z | # Webhooks for external integrations.
from typing import Any, Dict
from django.http import HttpRequest, HttpResponse
from zerver.decorator import REQ, api_key_only_webhook_view, \
has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
CHECK_IS_REPLY = "in reply to"
@api_key_only_webhook_view('Flock')
@has_request_variables
def api_flock_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
if len(payload["text"]) != 0:
message_body = payload["text"]
else:
message_body = payload["notification"]
topic = 'Flock notifications'
body = "{}".format(message_body)
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
| 30.266667 | 89 | 0.746696 |
56fa445307e90482a2f96bb18cfd6baffb50e967 | 2,478 | py | Python | 20-hs-redez-sem/groups/02-unionDir/filesystem-redez-client/net/filehandler.py | Kyrus1999/BACnet | 5be8e1377252166041bcd0b066cce5b92b077d06 | [
"MIT"
] | 8 | 2020-03-17T21:12:18.000Z | 2021-12-12T15:55:54.000Z | 20-hs-redez-sem/groups/02-unionDir/filesystem-redez-client/net/filehandler.py | Kyrus1999/BACnet | 5be8e1377252166041bcd0b066cce5b92b077d06 | [
"MIT"
] | 2 | 2021-07-19T06:18:43.000Z | 2022-02-10T12:17:58.000Z | 20-hs-redez-sem/groups/02-unionDir/filesystem-redez-client/net/filehandler.py | Kyrus1999/BACnet | 5be8e1377252166041bcd0b066cce5b92b077d06 | [
"MIT"
] | 25 | 2020-03-20T09:32:45.000Z | 2021-07-18T18:12:59.000Z | import os
from browser import help_functions
from utils import color
BUFFER_SIZE = 8 * 1024
class Filehandler:
def __init__(self, unionpath, client):
self.unionpath = unionpath
self.client = client
def send_file(self, hash):
name, timestamp, type, location, hash, extension, fs_path, mount = self.unionpath.translate_from_hash(hash)
name = name.replace(" ",".")
filepath = os.path.join(location, hash)
self.client.send("FILE {} {} {} {} {} {}".format(hash, name, timestamp, extension, fs_path, mount))
response = self.client.get()
if response == "READY":
try:
with open(filepath, 'rb') as file:
while True:
bytes = file.read()
if not bytes:
break
self.client.send_bytes(bytes)
self.client.send_bytes(b'\EOF')
response = self.client.get()
if response == "FINISHED":
return True
except:
self.client.send_bytes(b'\INT')
return False
else:
return False
def send_all_files_of_dir(self, dir):
files = help_functions.get_all_files_from_dir(dir)
for f in files:
self.send_file(f)
def get_file(self, message, dir=None, mount=None):
cmd, hash, name, timestamp, extension, fs_path, mount, location = message.split()
name = name.replace(".", " ")
if fs_path == "root":
fs_path = ""
if extension == "None":
extension = ""
if mount == "None" or not mount:
mount = ""
self.unionpath.add_to_dictionary(hash, name, type="file",location=location, fs_path=fs_path, extension=extension, mount=mount,timestamp=timestamp)
self.client.send("READY")
if dir:
location = os.path.join(self.unionpath.filesystem_root_dir, dir)
location = os.path.join(location, hash)
else:
location = os.path.join(self.unionpath.filesystem_root_dir, hash)
with open(location, "wb") as file:
while True:
bytes = self.client.server_socket.recv(BUFFER_SIZE)
file.write(bytes)
if bytes.strip()[-3:] == b'EOF':
break
file.close()
self.client.send("FINISHED")
return True | 37.545455 | 154 | 0.544794 |
616f2507fecc36d9561eea5a6bbb22cedfe9c704 | 418 | py | Python | corehq/sql_accessors/migrations/0048_livequery_sql.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/sql_accessors/migrations/0048_livequery_sql.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/sql_accessors/migrations/0048_livequery_sql.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | # Generated by Django 1.10.7 on 2017-07-06 21:18
from django.db import migrations
from corehq.sql_db.operations import RawSQLMigration
migrator = RawSQLMigration(('corehq', 'sql_accessors', 'sql_templates'), {})
class Migration(migrations.Migration):
dependencies = [
('sql_accessors', '0047_livequery_sql'),
]
operations = [
migrator.get_migration('get_related_indices.sql'),
]
| 22 | 76 | 0.703349 |
7c7c15413ecac75c0a2b2b446a62f86a6ce3b951 | 3,956 | py | Python | models/dtree.py | oaao/decisiontreehugger | 95472693ee9326c1ff4f54c414622761dfb36b87 | [
"MIT"
] | null | null | null | models/dtree.py | oaao/decisiontreehugger | 95472693ee9326c1ff4f54c414622761dfb36b87 | [
"MIT"
] | null | null | null | models/dtree.py | oaao/decisiontreehugger | 95472693ee9326c1ff4f54c414622761dfb36b87 | [
"MIT"
] | null | null | null | import numpy as np
from .base import ProtoTree
class DecisionTree(ProtoTree):
def __init__(self, max_depth=4, depth=1):
self.L = None
self.R = None
self.max_depth = max_depth
self.depth = depth
self.criteria = None
self.split_feature = None
self.impurity = 1
def _gini_impurity(self, data):
if data.empty:
return 0
p = data.value_counts().apply(lambda x: x/len(self.data)).tolist()
# binary classification means two resultant probabilities whose sum is 1; p*(1-p) is identical for both cases
# therefore, we can simply double the value of one case
print(p)
return 2 * p * (1 - p)
def _information_gain(self, feature, value):
L = feature <= v
R = feature > v
L_data, R_data = self.data[L], self.data[R]
L_impurity = self._gini_impurity(L_data[self.target])
R_impurity = self._gini_impurity(R_data[self.target])
gain = self.impurity_rate \
- (len(L_data) / len(self.data)) * L_impurity \
+ (len(R_data) / len(self.data)) * R_impurity
return gain
def _best_split_per_feature(self, attr):
feature = self.data[attr]
uniques = feature.unique()
info_gain = None
split = None
if len(uniques) == 1:
return info_gain, split
for value in uniques:
potential_gain = self._information_gain(feature, value)
if not info_gain or potential_gain > info_gain:
info_gain = potential_gain
split = value
return info_gain, split
def _best_split(self):
best = {}
for feature in self.independent:
info_gain, split = self._best_split_per_feature(feature)
if not split:
continue
if not split or split['gain'] < info_gain:
best = {'split': split, 'feature': feature, 'gain': info_gain}
return best['split'], best['feature']
def _branch(self):
self.L = DecisionTree(max_depth=self.max_depth, depth=self.depth+1)
self.R = DecisionTree(max_depth=self.max_depth, depth=self.depth+1)
L_rows = self.data[self.data[self.split_feature] <= self.criteria]
R_rows = self.data[self.data[self.split_feature] > self.criteria]
self.L.fit(data=L_rows, target=self.target)
self.R.fit(data=R_rows, target=self.target)
def _validate(self):
non_numeric = self.data[self.independent].select_dtypes(
include=['category', 'object', 'bool']
).columns.tolist()
if len(set(self.independent)).intersection(set(non_numeric)) != 0:
raise RuntimeError('all data features must be numeric')
self.data[self.target] = self.data[self.target].astype('category')
if len(self.data[self.target]).cat.categories != 2:
raise RuntimeError('binary implementation only: data features must have <= 2 cases each')
def fit(self, data, target):
"""
Derive and self-assign (training) data, target attribute, and independent attribute names.
data: pandas.core.frame.DataFrame
target: string
"""
if self.depth <= self.max_depth:
print(f'processing at depth: {self.depth}')
self.data = data
self.target = target
self.independent = self._get_independent(data, target)
if self.depth <= self.max_depth:
#self._validate()
self.impurity = self._gini_impurity(self.data[self.target])
self.criteria, self.split_feature, self.info_gain = self._best_split()
if self.criteria is not None and self.info_gain > 0:
self._branch()
else:
print('Branching ends; max depth has been reached')
| 28.257143 | 117 | 0.591507 |
cbb30a13b938aa6024c8f2367793389c61bc0852 | 5,622 | py | Python | GuiStylizedWidgets/coffee_widgets.py | TiNezlobinsky/SplineLV | 7281bca555f8eda802091cfbe3687b8ab59bfa4b | [
"MIT"
] | null | null | null | GuiStylizedWidgets/coffee_widgets.py | TiNezlobinsky/SplineLV | 7281bca555f8eda802091cfbe3687b8ab59bfa4b | [
"MIT"
] | null | null | null | GuiStylizedWidgets/coffee_widgets.py | TiNezlobinsky/SplineLV | 7281bca555f8eda802091cfbe3687b8ab59bfa4b | [
"MIT"
] | null | null | null | from PyQt5 import QtCore, QtWidgets
color_1 = "216, 218, 162"
color_2 = "134, 172, 176"
color_3 = "130, 113, 26"
color_4 = "232, 140, 90"
color_5 = "180, 212, 76"
color_6 = "196, 141, 195"
color_7 = "219, 189, 217"
back_rgb = color_1
class CoffeeButton(QtWidgets.QPushButton):
_buttons_style_sheet = "background: rgb(%s);" \
"font-size: 14px;" \
"border-radius: 8px;" \
"border: 2px groove grey;" \
"min-height: 1.3em;" % back_rgb
def __init__(self, title, parent=None):
QtWidgets.QPushButton.__init__(self, title, parent)
self.setStyleSheet(self._buttons_style_sheet)
class CoffeeColoredButton(CoffeeButton):
def __init__(self, color, title, parent=None):
CoffeeButton.__init__(self, title, parent)
self._selected_buttons_style_sheet = "background: rgb(%s);" \
"font-size: 14px;" \
"border-radius: 8px;" \
"border: 2px groove %s;" \
"min-height: 1.3em;" % (back_rgb, color)
self.clicked.connect(self._switch_state)
self._button_selected = False
def change_color(self, color):
self._selected_buttons_style_sheet = "background: rgb(%s);" \
"font-size: 14px;" \
"border-radius: 8px;" \
"border: 2px groove %s;" \
"min-height: 1.3em;" % (back_rgb, color)
def selected_On(self):
self.setStyleSheet(self._selected_buttons_style_sheet)
self._button_selected = True
def default_On(self):
self.setStyleSheet(self._buttons_style_sheet)
self._button_selected = False
def check_state(self):
return self._button_selected
def _switch_state(self):
if self._button_selected:
self.default_On()
else:
self.selected_On()
class CoffeeColoredRGBButton(CoffeeButton):
def __init__(self, color_rgb, title, parent=None):
CoffeeButton.__init__(self, title, parent)
self._selected_buttons_style_sheet = "background: rgb(%s);" \
"font-size: 14px;" \
"border-radius: 8px;" \
"border: 2px groove rgb(%s);" \
"min-height: 1.3em;" % (back_rgb, color_rgb)
self.clicked.connect(self._switch_state)
self._button_selected = False
def change_color(self, color):
self._selected_buttons_style_sheet = "background: rgb(%s);" \
"font-size: 14px;" \
"border-radius: 8px;" \
"border: 2px groove %s;" \
"min-height: 1.3em;" % (back_rgb, color)
def selected_On(self):
self.setStyleSheet(self._selected_buttons_style_sheet)
self._button_selected = True
def default_On(self):
self.setStyleSheet(self._buttons_style_sheet)
self._button_selected = False
def check_state(self):
return self._button_selected
def _switch_state(self):
if self._button_selected:
self.default_On()
else:
self.selected_On()
class CoffeeLineEdit(QtWidgets.QLineEdit):
_lines_style_sheet = "border-radius: 8px;" \
"border: 2px groove grey;" \
"font-size: 14px;" \
"min-height: 1.3em;"
def __init__(self, parent=None):
QtWidgets.QLineEdit.__init__(self, parent)
self.setStyleSheet(self._lines_style_sheet)
class CoffeeColoredLine(QtWidgets.QLineEdit):
def __init__(self, color, parent=None):
QtWidgets.QLineEdit.__init__(self, parent)
self._lines_style_sheet = "border-radius: 8px;" \
"border: 2px groove %s;" \
"font-size: 14px;" \
"max-height: 1.3em;" % color
self.setStyleSheet(self._lines_style_sheet)
self.setReadOnly(True)
class CoffeeFullColoredRGBLine(QtWidgets.QLineEdit):
def __init__(self, rgb_string, parent=None):
QtWidgets.QLineEdit.__init__(self, parent)
self._lines_style_sheet = "background: rgb(%s);" \
"border-radius: 9px;" \
"font-size: 14px;" \
"max-height: 1.0em;" % rgb_string
self.setStyleSheet(self._lines_style_sheet)
self.setReadOnly(True)
def change_color(self, rgb_string):
self._lines_style_sheet = "background: rgb(%s);" \
"border-radius: 9px;" \
"max-height: 1.0em;" % rgb_string
self.setStyleSheet(self._lines_style_sheet)
class CoffeeListWidget(QtWidgets.QListWidget):
_listwidget_style_sheet = "border-radius: 8px;" \
"font-size: 14px;" \
"border: 2px groove grey;" \
"min-height: 1.3em;"
def __init__(self, parent=None):
QtWidgets.QListWidget.__init__(self, parent)
self.setStyleSheet(self._listwidget_style_sheet)
| 39.041667 | 89 | 0.524191 |
61167c042d116128b1c7cbe28ca5de9ef425cf7a | 68 | py | Python | src/__init__.py | Promeos/LADOT-Street-Sweeping-Transition-Pan | eb0d224a7ba910c4bf1db78b9fdb1365de0e6945 | [
"MIT"
] | 1 | 2021-02-05T16:05:02.000Z | 2021-02-05T16:05:02.000Z | src/__init__.py | Promeos/LADOT-Street-Sweeping-Transition-Plan | eb0d224a7ba910c4bf1db78b9fdb1365de0e6945 | [
"MIT"
] | null | null | null | src/__init__.py | Promeos/LADOT-Street-Sweeping-Transition-Plan | eb0d224a7ba910c4bf1db78b9fdb1365de0e6945 | [
"MIT"
] | null | null | null | from .acquire import *
from .prepare import *
from .explore import * | 22.666667 | 22 | 0.75 |
cd806c9e2e4c333b8a1e82d1522723012d692250 | 10,981 | py | Python | e2e_tests/helpers.py | QueensU-Cloud/AzureTRE | 8118bb9720785b64ba01dae0ce4558b1d40dee1c | [
"MIT"
] | null | null | null | e2e_tests/helpers.py | QueensU-Cloud/AzureTRE | 8118bb9720785b64ba01dae0ce4558b1d40dee1c | [
"MIT"
] | null | null | null | e2e_tests/helpers.py | QueensU-Cloud/AzureTRE | 8118bb9720785b64ba01dae0ce4558b1d40dee1c | [
"MIT"
] | null | null | null | import asyncio
from contextlib import asynccontextmanager
from httpx import AsyncClient
from starlette import status
import config
from resources import strings
class InstallFailedException(Exception):
pass
def read_workspace_id() -> str:
with open('workspace_id.txt', 'r') as f:
workspace_id = f.readline()
return workspace_id
def write_workspace_id(workspace_id: str) -> None:
with open('workspace_id.txt', 'w') as f:
f.write(workspace_id)
def get_auth_header(token: str) -> dict:
return {'Authorization': f'Bearer {token}'}
@asynccontextmanager
async def get_template(template_name, token, verify):
async with AsyncClient(verify=verify) as client:
headers = {'Authorization': f'Bearer {token}'}
response = await client.get(f"https://{config.TRE_ID}.{config.RESOURCE_LOCATION}.cloudapp.azure.com{strings.API_WORKSPACE_TEMPLATES}/{template_name}", headers=headers)
yield response
async def install_done(client, workspace_id, headers) -> (bool, str, str):
install_terminal_states = [strings.RESOURCE_STATUS_DEPLOYED, strings.RESOURCE_STATUS_FAILED]
deployment_status, message = await check_deployment(client, workspace_id, headers)
return (True, deployment_status, message) if deployment_status in install_terminal_states else (False, deployment_status, message)
async def check_deployment(client, workspace_id, headers) -> (str, str):
response = await client.get(
f"https://{config.TRE_ID}.{config.RESOURCE_LOCATION}.cloudapp.azure.com{strings.API_WORKSPACES}/{workspace_id}",
headers=headers)
if response.status_code == 200:
deployment_status = response.json()["workspace"]["deployment"]["status"]
message = response.json()["workspace"]["deployment"]["message"]
return deployment_status, message
elif response.status_code == 404:
# Seems like the resource got deleted
return strings.RESOURCE_STATUS_DELETED, "Workspace was deleted"
async def post_workspace_template(payload, token, verify) -> (str, bool):
async with AsyncClient(verify=verify) as client:
headers = {'Authorization': f'Bearer {token}'}
response = await client.post(f"https://{config.TRE_ID}.{config.RESOURCE_LOCATION}.cloudapp.azure.com{strings.API_WORKSPACES}", headers=headers, json=payload)
assert (response.status_code == status.HTTP_202_ACCEPTED), f"Request for workspace {payload['templateName']} creation failed"
workspace_id = response.json()["workspaceId"]
write_workspace_id(workspace_id)
try:
await wait_for(install_done, client, workspace_id, headers, strings.RESOURCE_STATUS_FAILED)
return workspace_id, True
except Exception:
return workspace_id, False
async def delete_done(client, workspace_id, headers):
delete_terminal_states = [strings.RESOURCE_STATUS_DELETED, strings.RESOURCE_STATUS_DELETING_FAILED]
deployment_status, message = await check_deployment(client, workspace_id, headers)
return (True, deployment_status, message) if deployment_status in delete_terminal_states else (False, deployment_status, message)
async def wait_for(func, client, workspace_id, headers, failure_state):
done, done_state, message = await func(client, workspace_id, headers)
while not done:
await asyncio.sleep(60)
done, done_state, message = await func(client, workspace_id, headers)
try:
assert done_state != failure_state
except Exception as e:
print(f"Failed to deploy status message: {message}")
print(e)
raise
async def disable_workspace(token, verify) -> None:
async with AsyncClient(verify=verify) as client:
payload = {"enabled": "false"}
workspace_id = read_workspace_id()
response = await client.patch(f"https://{config.TRE_ID}.{config.RESOURCE_LOCATION}.cloudapp.azure.com{strings.API_WORKSPACES}/{workspace_id}", headers=get_auth_header(token), json=payload)
enabled = response.json()["workspace"]["properties"]["enabled"]
assert (enabled is False), "The workspace wasn't disabled"
async def disable_and_delete_workspace(workspace_id, install_status, token, verify):
async with AsyncClient(verify=verify) as client:
headers = get_auth_header(token)
await disable_workspace(token, verify)
await delete_workspace(token, verify)
try:
await wait_for(delete_done, client, workspace_id, headers, strings.RESOURCE_STATUS_DELETING_FAILED)
except Exception:
raise
finally:
if not install_status:
raise InstallFailedException("Install was not done successfully")
async def delete_workspace(token, verify) -> None:
async with AsyncClient(verify=verify) as client:
workspace_id = read_workspace_id()
response = await client.delete(f"https://{config.TRE_ID}.{config.RESOURCE_LOCATION}.cloudapp.azure.com{strings.API_WORKSPACES}/{workspace_id}", headers=get_auth_header(token))
assert (response.status_code == status.HTTP_200_OK), "The workspace couldn't be deleted"
@asynccontextmanager
async def get_service_template(template_name, token, verify):
async with AsyncClient(verify=verify) as client:
headers = {'Authorization': f'Bearer {token}'}
response = await client.get(f"https://{config.TRE_ID}.{config.RESOURCE_LOCATION}.cloudapp.azure.com{strings.API_WORKSPACE_SERVICE_TEMPLATES}/{template_name}", headers=headers)
yield response
async def install_service_done(client, workspace_id, workspace_service_id, headers):
install_terminal_states = [strings.RESOURCE_STATUS_DEPLOYED, strings.RESOURCE_STATUS_FAILED]
deployment_status, message = await check_service_deployment(client, workspace_id, workspace_service_id, headers)
return (True, deployment_status, message) if deployment_status in install_terminal_states else (False, deployment_status, message)
async def delete_service_done(client, workspace_id, workspace_service_id, headers):
delete_terminal_states = [strings.RESOURCE_STATUS_DELETED, strings.RESOURCE_STATUS_DELETING_FAILED]
deployment_status, message = await check_service_deployment(client, workspace_id, workspace_service_id, headers)
return (True, deployment_status, message) if deployment_status in delete_terminal_states else (False, deployment_status, message)
async def check_service_deployment(client, workspace_id, workspace_service_id, headers) -> (str, str):
response = await client.get(f"https://{config.TRE_ID}.{config.RESOURCE_LOCATION}.cloudapp.azure.com{strings.API_WORKSPACES}/{workspace_id}/{strings.API_WORKSPACE_SERVICES}/{workspace_service_id}", headers=headers)
if response.status_code == 200:
deployment_status = response.json()["workspaceService"]["deployment"]["status"]
message = response.json()["workspaceService"]["deployment"]["message"]
return deployment_status, message
elif response.status_code == 404:
# Seems like the resource got deleted
return strings.RESOURCE_STATUS_DELETED, "Workspace service was deleted"
async def wait_for_service(func, client, workspace_id, workspace_service_id, headers, failure_state):
done, done_state, message = await func(client, workspace_id, workspace_service_id, headers)
while not done:
await asyncio.sleep(60)
done, done_state, message = await func(client, workspace_id, workspace_service_id, headers)
try:
assert done_state != failure_state
except Exception as e:
print(f"Failed to deploy status message: {message}")
print(e)
raise
async def post_workspace_service_template(workspace_id, payload, token, verify):
async with AsyncClient(verify=verify) as client:
response = await client.post(f"https://{config.TRE_ID}.{config.RESOURCE_LOCATION}.cloudapp.azure.com{strings.API_WORKSPACES}/{workspace_id}/{strings.API_WORKSPACE_SERVICES}", headers=get_auth_header(token), json=payload)
assert (response.status_code == status.HTTP_202_ACCEPTED), f"Request for workspace service {payload['templateName']} creation failed"
workspace_service_id = response.json()["workspaceServiceId"]
try:
await wait_for_service(install_service_done, client, workspace_id, workspace_service_id, get_auth_header(token), strings.RESOURCE_STATUS_FAILED)
return workspace_service_id, True
except Exception:
return workspace_service_id, False
async def disable_workspace_service(workspace_id, workspace_service_id, token, verify) -> None:
async with AsyncClient(verify=verify) as client:
payload = {"enabled": "false"}
response = await client.patch(f"https://{config.TRE_ID}.{config.RESOURCE_LOCATION}.cloudapp.azure.com{strings.API_WORKSPACES}/{workspace_id}/{strings.API_WORKSPACE_SERVICES}/{workspace_service_id}", headers=get_auth_header(token), json=payload)
enabled = response.json()["workspaceService"]["properties"]["enabled"]
assert (enabled is False), "The workspace service wasn't disabled"
async def delete_workspace_service(workspace_id, workspace_service_id, token, verify) -> None:
async with AsyncClient(verify=verify) as client:
response = await client.delete(f"https://{config.TRE_ID}.{config.RESOURCE_LOCATION}.cloudapp.azure.com{strings.API_WORKSPACES}/{workspace_id}/{strings.API_WORKSPACE_SERVICES}/{workspace_service_id}", headers=get_auth_header(token))
assert (response.status_code == status.HTTP_200_OK), "The workspace service couldn't be deleted"
async def ping_guacamole_workspace_service(workspace_id, workspace_service_id, token, verify) -> None:
async with AsyncClient(verify=verify) as client:
short_workspace_id = workspace_id[-4:]
short_workspace_service_id = workspace_service_id[-4:]
response = await client.get(f"https://guacamole-{config.TRE_ID}-ws-{short_workspace_id}-svc-{short_workspace_service_id}.azurewebsites.net/guacamole", headers={'x-access-token': f'{token}'}, timeout=300)
assert (response.status_code == status.HTTP_200_OK), "Guacamole cannot be reached"
async def disable_and_delete_workspace_service(workspace_id, workspace_service_id, install_status, token, verify):
async with AsyncClient(verify=verify) as client:
headers = {'Authorization': f'Bearer {token}'}
await disable_workspace_service(workspace_id, workspace_service_id, token, verify)
await delete_workspace_service(workspace_id, workspace_service_id, token, verify)
try:
await wait_for_service(delete_service_done, client, workspace_id, workspace_service_id, headers, strings.RESOURCE_STATUS_DELETING_FAILED)
except Exception:
raise
finally:
if not install_status:
raise InstallFailedException("Install was not done successfully")
| 47.951965 | 252 | 0.740643 |
1b23149dea3b2b3be4ad662e7d41184141c292a0 | 7,375 | py | Python | aiida_gaussian/calculations/gaussian.py | kjappelbaum/aiida-gaussian | 6208a2a195b1decd292f6cc35c84f3999bad2c55 | [
"MIT"
] | null | null | null | aiida_gaussian/calculations/gaussian.py | kjappelbaum/aiida-gaussian | 6208a2a195b1decd292f6cc35c84f3999bad2c55 | [
"MIT"
] | null | null | null | aiida_gaussian/calculations/gaussian.py | kjappelbaum/aiida-gaussian | 6208a2a195b1decd292f6cc35c84f3999bad2c55 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Gaussian input plugin."""
from __future__ import absolute_import
import os
from shutil import copyfile, copytree
import six
from six.moves import map, range
from aiida.orm import Dict, FolderData, List, RemoteData, SinglefileData
from aiida.common import CalcInfo, CodeInfo, InputValidationError
# from aiida.cmdline.utils import echo
from aiida.engine import CalcJob
from aiida.plugins import DataFactory
import pymatgen as mg
import pymatgen.io.gaussian as mgaus
StructureData = DataFactory("structure")
class GaussianCalculation(CalcJob):
"""
AiiDA calculation plugin wrapping Gaussian
Template:
parameters = Dict(dict={
'link0_parameters': {
'%chk':'aiida.chk',
'%mem': '1024MB',
'%nprocshared': '2',
},
'functional':'PBE1PBE',
'basis_set':'6-31g',
'charge': 0,
'multiplicity': 1,
'route_parameters': {
'scf': {'cdiis': None}
'nosymm': None,
'opt': 'tight',
},
})
"""
# Defaults
INPUT_FILE = "aiida.inp"
OUTPUT_FILE = "aiida.out"
DEFAULT_PARSER = "gaussian_base_parser"
@classmethod
def define(cls, spec):
super(GaussianCalculation, cls).define(spec)
# Input parameters
spec.input(
"structure",
valid_type=StructureData,
required=False,
help="Input structure; will be converted to pymatgen object",
)
spec.input(
"parameters", valid_type=Dict, required=True, help="Input parameters"
)
spec.input(
"settings",
valid_type=Dict,
required=False,
help="additional input parameters",
)
spec.input(
"parent_calc_folder",
valid_type=RemoteData,
required=False,
help="the folder of a completed gaussian calculation",
)
spec.input_namespace(
"extra_link1_sections",
valid_type=Dict,
required=False,
dynamic=True,
help="parameters for extra link1 sections",
)
# Turn mpi off by default
spec.input("metadata.options.withmpi", valid_type=bool, default=False)
spec.input(
"metadata.options.parser_name",
valid_type=six.string_types,
default=cls.DEFAULT_PARSER,
non_db=True,
)
# Outputs
spec.output(
"output_parameters",
valid_type=Dict,
required=True,
help="The result parameters of the calculation",
)
spec.output(
"output_structure",
valid_type=StructureData,
required=False,
help="Final optimized structure, if available",
)
spec.default_output_node = "output_parameters"
spec.outputs.dynamic = True
# Exit codes
spec.exit_code(
100,
"ERROR_MISSING_OUTPUT_FILES",
message="Calculation did not produce all expected output files.",
)
# --------------------------------------------------------------------------
# pylint: disable = too-many-locals
def prepare_for_submission(self, folder):
"""
This is the routine to be called when you want to create
the input files and related stuff with a plugin.
:param folder: a aiida.common.folders.Folder subclass where
the plugin should put all its files.
"""
# create calc info
calcinfo = CalcInfo()
calcinfo.remote_copy_list = []
calcinfo.local_copy_list = []
# The main input
try:
input_string = GaussianCalculation._render_input_string_from_params(
self.inputs.parameters.get_dict(), self.inputs.structure
)
# If structure is not specified the user might want to restart from a chk
except AttributeError:
input_string = GaussianCalculation._render_input_string_from_params(
self.inputs.parameters.get_dict(), None
)
# Parse additional link1 sections
if "extra_link1_sections" in self.inputs:
for l1_name, l1_params in self.inputs.extra_link1_sections.items():
input_string += "--Link1--\n"
# The link1 secions don't support their own geometries.
input_string += GaussianCalculation._render_input_string_from_params(
l1_params.get_dict(), None
)
with open(folder.get_abs_path(self.INPUT_FILE), "w") as out_file:
out_file.write(input_string)
settings = self.inputs.settings.get_dict() if "settings" in self.inputs else {}
# create code info
codeinfo = CodeInfo()
codeinfo.cmdline_params = settings.pop("cmdline", [])
codeinfo.code_uuid = self.inputs.code.uuid
codeinfo.stdin_name = self.INPUT_FILE
codeinfo.stdout_name = self.OUTPUT_FILE
codeinfo.withmpi = self.inputs.metadata.options.withmpi
# create calculation info
calcinfo.uuid = self.uuid
calcinfo.cmdline_params = codeinfo.cmdline_params
calcinfo.stdin_name = self.INPUT_FILE
calcinfo.stdout_name = self.OUTPUT_FILE
calcinfo.codes_info = [codeinfo]
calcinfo.retrieve_list = [self.OUTPUT_FILE]
# symlink or copy to parent calculation
calcinfo.remote_symlink_list = []
calcinfo.remote_copy_list = []
if "parent_calc_folder" in self.inputs:
comp_uuid = self.inputs.parent_calc_folder.computer.uuid
remote_path = self.inputs.parent_calc_folder.get_remote_path()
copy_info = (comp_uuid, remote_path, "parent_calc")
if (
self.inputs.code.computer.uuid == comp_uuid
): # if running on the same computer - make a symlink
# if not - copy the folder
calcinfo.remote_symlink_list.append(copy_info)
else:
calcinfo.remote_copy_list.append(copy_info)
return calcinfo
@classmethod
def _render_input_string_from_params(cls, param_dict, structure):
# the structure
pmg_mol = structure.get_pymatgen_molecule() if structure else None
# Determine charge and multiplicity
charge = param_dict["charge"] if "charge" in param_dict else pmg_mol.charge
multiplicity = (
param_dict["multiplicity"]
if "multiplicity" in param_dict
else pmg_mol.multiplicity
)
inp = mgaus.GaussianInput(
pmg_mol,
charge=charge,
spin_multiplicity=multiplicity,
title="input generated by the aiida-gaussian plugin",
functional=param_dict.get(
"functional"
), # dict.get returns None if key is not in dict
basis_set=param_dict.get("basis_set"),
route_parameters=param_dict.get("route_parameters"),
input_parameters=param_dict.get("input_parameters"),
link0_parameters=param_dict.get("link0_parameters"),
dieze_tag="#P",
)
return inp.to_string(cart_coords=True)
| 32.488987 | 87 | 0.595661 |
2c50002de300f801c0db5ff35c4a155507d72594 | 116,638 | py | Python | sympy/core/numbers.py | qcgm1978/sympy | cc46047f4449b525b7b0edd4c634bf93d6e7c83d | [
"BSD-3-Clause"
] | 2 | 2021-01-09T23:11:25.000Z | 2021-01-11T15:04:22.000Z | sympy/core/numbers.py | qcgm1978/sympy | cc46047f4449b525b7b0edd4c634bf93d6e7c83d | [
"BSD-3-Clause"
] | 3 | 2021-02-28T03:58:40.000Z | 2021-03-07T06:12:47.000Z | sympy/core/numbers.py | qcgm1978/sympy | cc46047f4449b525b7b0edd4c634bf93d6e7c83d | [
"BSD-3-Clause"
] | 2 | 2021-01-08T23:03:23.000Z | 2021-01-13T18:57:02.000Z | import numbers
import decimal
import fractions
import math
import re as regex
from .containers import Tuple
from .sympify import (SympifyError, converter, sympify, _convert_numpy_types, _sympify,
_is_numpy_instance)
from .singleton import S, Singleton
from .expr import Expr, AtomicExpr
from .evalf import pure_complex
from .decorators import _sympifyit
from .cache import cacheit, clear_cache
from .logic import fuzzy_not
from sympy.core.compatibility import (as_int, HAS_GMPY, SYMPY_INTS,
int_info, gmpy)
from sympy.core.cache import lru_cache
from sympy.multipledispatch import dispatch
import mpmath
import mpmath.libmp as mlib
from mpmath.libmp import bitcount
from mpmath.libmp.backend import MPZ
from mpmath.libmp import mpf_pow, mpf_pi, mpf_e, phi_fixed
from mpmath.ctx_mp import mpnumeric
from mpmath.libmp.libmpf import (
finf as _mpf_inf, fninf as _mpf_ninf,
fnan as _mpf_nan, fzero, _normalize as mpf_normalize,
prec_to_dps)
from sympy.utilities.misc import debug, filldedent
from .parameters import global_parameters
from sympy.utilities.exceptions import SymPyDeprecationWarning
rnd = mlib.round_nearest
_LOG2 = math.log(2)
def comp(z1, z2, tol=None):
"""Return a bool indicating whether the error between z1 and z2
is <= tol.
Examples
========
If ``tol`` is None then True will be returned if
``abs(z1 - z2)*10**p <= 5`` where ``p`` is minimum value of the
decimal precision of each value.
>>> from sympy.core.numbers import comp, pi
>>> pi4 = pi.n(4); pi4
3.142
>>> comp(_, 3.142)
True
>>> comp(pi4, 3.141)
False
>>> comp(pi4, 3.143)
False
A comparison of strings will be made
if ``z1`` is a Number and ``z2`` is a string or ``tol`` is ''.
>>> comp(pi4, 3.1415)
True
>>> comp(pi4, 3.1415, '')
False
When ``tol`` is provided and ``z2`` is non-zero and
``|z1| > 1`` the error is normalized by ``|z1|``:
>>> abs(pi4 - 3.14)/pi4
0.000509791731426756
>>> comp(pi4, 3.14, .001) # difference less than 0.1%
True
>>> comp(pi4, 3.14, .0005) # difference less than 0.1%
False
When ``|z1| <= 1`` the absolute error is used:
>>> 1/pi4
0.3183
>>> abs(1/pi4 - 0.3183)/(1/pi4)
3.07371499106316e-5
>>> abs(1/pi4 - 0.3183)
9.78393554684764e-6
>>> comp(1/pi4, 0.3183, 1e-5)
True
To see if the absolute error between ``z1`` and ``z2`` is less
than or equal to ``tol``, call this as ``comp(z1 - z2, 0, tol)``
or ``comp(z1 - z2, tol=tol)``:
>>> abs(pi4 - 3.14)
0.00160156249999988
>>> comp(pi4 - 3.14, 0, .002)
True
>>> comp(pi4 - 3.14, 0, .001)
False
"""
if type(z2) is str:
if not pure_complex(z1, or_real=True):
raise ValueError('when z2 is a str z1 must be a Number')
return str(z1) == z2
if not z1:
z1, z2 = z2, z1
if not z1:
return True
if not tol:
a, b = z1, z2
if tol == '':
return str(a) == str(b)
if tol is None:
a, b = sympify(a), sympify(b)
if not all(i.is_number for i in (a, b)):
raise ValueError('expecting 2 numbers')
fa = a.atoms(Float)
fb = b.atoms(Float)
if not fa and not fb:
# no floats -- compare exactly
return a == b
# get a to be pure_complex
for do in range(2):
ca = pure_complex(a, or_real=True)
if not ca:
if fa:
a = a.n(prec_to_dps(min([i._prec for i in fa])))
ca = pure_complex(a, or_real=True)
break
else:
fa, fb = fb, fa
a, b = b, a
cb = pure_complex(b)
if not cb and fb:
b = b.n(prec_to_dps(min([i._prec for i in fb])))
cb = pure_complex(b, or_real=True)
if ca and cb and (ca[1] or cb[1]):
return all(comp(i, j) for i, j in zip(ca, cb))
tol = 10**prec_to_dps(min(a._prec, getattr(b, '_prec', a._prec)))
return int(abs(a - b)*tol) <= 5
diff = abs(z1 - z2)
az1 = abs(z1)
if z2 and az1 > 1:
return diff/az1 <= tol
else:
return diff <= tol
def mpf_norm(mpf, prec):
"""Return the mpf tuple normalized appropriately for the indicated
precision after doing a check to see if zero should be returned or
not when the mantissa is 0. ``mpf_normlize`` always assumes that this
is zero, but it may not be since the mantissa for mpf's values "+inf",
"-inf" and "nan" have a mantissa of zero, too.
Note: this is not intended to validate a given mpf tuple, so sending
mpf tuples that were not created by mpmath may produce bad results. This
is only a wrapper to ``mpf_normalize`` which provides the check for non-
zero mpfs that have a 0 for the mantissa.
"""
sign, man, expt, bc = mpf
if not man:
# hack for mpf_normalize which does not do this;
# it assumes that if man is zero the result is 0
# (see issue 6639)
if not bc:
return fzero
else:
# don't change anything; this should already
# be a well formed mpf tuple
return mpf
# Necessary if mpmath is using the gmpy backend
from mpmath.libmp.backend import MPZ
rv = mpf_normalize(sign, MPZ(man), expt, bc, prec, rnd)
return rv
# TODO: we should use the warnings module
_errdict = {"divide": False}
def seterr(divide=False):
"""
Should sympy raise an exception on 0/0 or return a nan?
divide == True .... raise an exception
divide == False ... return nan
"""
if _errdict["divide"] != divide:
clear_cache()
_errdict["divide"] = divide
def _as_integer_ratio(p):
neg_pow, man, expt, bc = getattr(p, '_mpf_', mpmath.mpf(p)._mpf_)
p = [1, -1][neg_pow % 2]*man
if expt < 0:
q = 2**-expt
else:
q = 1
p *= 2**expt
return int(p), int(q)
def _decimal_to_Rational_prec(dec):
"""Convert an ordinary decimal instance to a Rational."""
if not dec.is_finite():
raise TypeError("dec must be finite, got %s." % dec)
s, d, e = dec.as_tuple()
prec = len(d)
if e >= 0: # it's an integer
rv = Integer(int(dec))
else:
s = (-1)**s
d = sum([di*10**i for i, di in enumerate(reversed(d))])
rv = Rational(s*d, 10**-e)
return rv, prec
_floatpat = regex.compile(r"[-+]?((\d*\.\d+)|(\d+\.?))")
def _literal_float(f):
"""Return True if n starts like a floating point number."""
return bool(_floatpat.match(f))
# (a,b) -> gcd(a,b)
# TODO caching with decorator, but not to degrade performance
@lru_cache(1024)
def igcd(*args):
"""Computes nonnegative integer greatest common divisor.
Explanation
===========
The algorithm is based on the well known Euclid's algorithm. To
improve speed, igcd() has its own caching mechanism implemented.
Examples
========
>>> from sympy.core.numbers import igcd
>>> igcd(2, 4)
2
>>> igcd(5, 10, 15)
5
"""
if len(args) < 2:
raise TypeError(
'igcd() takes at least 2 arguments (%s given)' % len(args))
args_temp = [abs(as_int(i)) for i in args]
if 1 in args_temp:
return 1
a = args_temp.pop()
if HAS_GMPY: # Using gmpy if present to speed up.
for b in args_temp:
a = gmpy.gcd(a, b) if b else a
return as_int(a)
for b in args_temp:
a = igcd2(a, b) if b else a
return a
def _igcd2_python(a, b):
"""Compute gcd of two Python integers a and b."""
if (a.bit_length() > BIGBITS and
b.bit_length() > BIGBITS):
return igcd_lehmer(a, b)
a, b = abs(a), abs(b)
while b:
a, b = b, a % b
return a
try:
from math import gcd as igcd2
except ImportError:
igcd2 = _igcd2_python
# Use Lehmer's algorithm only for very large numbers.
BIGBITS = 5000
def igcd_lehmer(a, b):
"""Computes greatest common divisor of two integers.
Explanation
===========
Euclid's algorithm for the computation of the greatest
common divisor gcd(a, b) of two (positive) integers
a and b is based on the division identity
a = q*b + r,
where the quotient q and the remainder r are integers
and 0 <= r < b. Then each common divisor of a and b
divides r, and it follows that gcd(a, b) == gcd(b, r).
The algorithm works by constructing the sequence
r0, r1, r2, ..., where r0 = a, r1 = b, and each rn
is the remainder from the division of the two preceding
elements.
In Python, q = a // b and r = a % b are obtained by the
floor division and the remainder operations, respectively.
These are the most expensive arithmetic operations, especially
for large a and b.
Lehmer's algorithm is based on the observation that the quotients
qn = r(n-1) // rn are in general small integers even
when a and b are very large. Hence the quotients can be
usually determined from a relatively small number of most
significant bits.
The efficiency of the algorithm is further enhanced by not
computing each long remainder in Euclid's sequence. The remainders
are linear combinations of a and b with integer coefficients
derived from the quotients. The coefficients can be computed
as far as the quotients can be determined from the chosen
most significant parts of a and b. Only then a new pair of
consecutive remainders is computed and the algorithm starts
anew with this pair.
References
==========
.. [1] https://en.wikipedia.org/wiki/Lehmer%27s_GCD_algorithm
"""
a, b = abs(as_int(a)), abs(as_int(b))
if a < b:
a, b = b, a
# The algorithm works by using one or two digit division
# whenever possible. The outer loop will replace the
# pair (a, b) with a pair of shorter consecutive elements
# of the Euclidean gcd sequence until a and b
# fit into two Python (long) int digits.
nbits = 2*int_info.bits_per_digit
while a.bit_length() > nbits and b != 0:
# Quotients are mostly small integers that can
# be determined from most significant bits.
n = a.bit_length() - nbits
x, y = int(a >> n), int(b >> n) # most significant bits
# Elements of the Euclidean gcd sequence are linear
# combinations of a and b with integer coefficients.
# Compute the coefficients of consecutive pairs
# a' = A*a + B*b, b' = C*a + D*b
# using small integer arithmetic as far as possible.
A, B, C, D = 1, 0, 0, 1 # initial values
while True:
# The coefficients alternate in sign while looping.
# The inner loop combines two steps to keep track
# of the signs.
# At this point we have
# A > 0, B <= 0, C <= 0, D > 0,
# x' = x + B <= x < x" = x + A,
# y' = y + C <= y < y" = y + D,
# and
# x'*N <= a' < x"*N, y'*N <= b' < y"*N,
# where N = 2**n.
# Now, if y' > 0, and x"//y' and x'//y" agree,
# then their common value is equal to q = a'//b'.
# In addition,
# x'%y" = x' - q*y" < x" - q*y' = x"%y',
# and
# (x'%y")*N < a'%b' < (x"%y')*N.
# On the other hand, we also have x//y == q,
# and therefore
# x'%y" = x + B - q*(y + D) = x%y + B',
# x"%y' = x + A - q*(y + C) = x%y + A',
# where
# B' = B - q*D < 0, A' = A - q*C > 0.
if y + C <= 0:
break
q = (x + A) // (y + C)
# Now x'//y" <= q, and equality holds if
# x' - q*y" = (x - q*y) + (B - q*D) >= 0.
# This is a minor optimization to avoid division.
x_qy, B_qD = x - q*y, B - q*D
if x_qy + B_qD < 0:
break
# Next step in the Euclidean sequence.
x, y = y, x_qy
A, B, C, D = C, D, A - q*C, B_qD
# At this point the signs of the coefficients
# change and their roles are interchanged.
# A <= 0, B > 0, C > 0, D < 0,
# x' = x + A <= x < x" = x + B,
# y' = y + D < y < y" = y + C.
if y + D <= 0:
break
q = (x + B) // (y + D)
x_qy, A_qC = x - q*y, A - q*C
if x_qy + A_qC < 0:
break
x, y = y, x_qy
A, B, C, D = C, D, A_qC, B - q*D
# Now the conditions on top of the loop
# are again satisfied.
# A > 0, B < 0, C < 0, D > 0.
if B == 0:
# This can only happen when y == 0 in the beginning
# and the inner loop does nothing.
# Long division is forced.
a, b = b, a % b
continue
# Compute new long arguments using the coefficients.
a, b = A*a + B*b, C*a + D*b
# Small divisors. Finish with the standard algorithm.
while b:
a, b = b, a % b
return a
def ilcm(*args):
"""Computes integer least common multiple.
Examples
========
>>> from sympy.core.numbers import ilcm
>>> ilcm(5, 10)
10
>>> ilcm(7, 3)
21
>>> ilcm(5, 10, 15)
30
"""
if len(args) < 2:
raise TypeError(
'ilcm() takes at least 2 arguments (%s given)' % len(args))
if 0 in args:
return 0
a = args[0]
for b in args[1:]:
a = a // igcd(a, b) * b # since gcd(a,b) | a
return a
def igcdex(a, b):
"""Returns x, y, g such that g = x*a + y*b = gcd(a, b).
Examples
========
>>> from sympy.core.numbers import igcdex
>>> igcdex(2, 3)
(-1, 1, 1)
>>> igcdex(10, 12)
(-1, 1, 2)
>>> x, y, g = igcdex(100, 2004)
>>> x, y, g
(-20, 1, 4)
>>> x*100 + y*2004
4
"""
if (not a) and (not b):
return (0, 1, 0)
if not a:
return (0, b//abs(b), abs(b))
if not b:
return (a//abs(a), 0, abs(a))
if a < 0:
a, x_sign = -a, -1
else:
x_sign = 1
if b < 0:
b, y_sign = -b, -1
else:
y_sign = 1
x, y, r, s = 1, 0, 0, 1
while b:
(c, q) = (a % b, a // b)
(a, b, r, s, x, y) = (b, c, x - q*r, y - q*s, r, s)
return (x*x_sign, y*y_sign, a)
def mod_inverse(a, m):
"""
Return the number c such that, (a * c) = 1 (mod m)
where c has the same sign as m. If no such value exists,
a ValueError is raised.
Examples
========
>>> from sympy import S
>>> from sympy.core.numbers import mod_inverse
Suppose we wish to find multiplicative inverse x of
3 modulo 11. This is the same as finding x such
that 3 * x = 1 (mod 11). One value of x that satisfies
this congruence is 4. Because 3 * 4 = 12 and 12 = 1 (mod 11).
This is the value returned by mod_inverse:
>>> mod_inverse(3, 11)
4
>>> mod_inverse(-3, 11)
7
When there is a common factor between the numerators of
``a`` and ``m`` the inverse does not exist:
>>> mod_inverse(2, 4)
Traceback (most recent call last):
...
ValueError: inverse of 2 mod 4 does not exist
>>> mod_inverse(S(2)/7, S(5)/2)
7/2
References
==========
.. [1] https://en.wikipedia.org/wiki/Modular_multiplicative_inverse
.. [2] https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm
"""
c = None
try:
a, m = as_int(a), as_int(m)
if m != 1 and m != -1:
x, y, g = igcdex(a, m)
if g == 1:
c = x % m
except ValueError:
a, m = sympify(a), sympify(m)
if not (a.is_number and m.is_number):
raise TypeError(filldedent('''
Expected numbers for arguments; symbolic `mod_inverse`
is not implemented
but symbolic expressions can be handled with the
similar function,
sympy.polys.polytools.invert'''))
big = (m > 1)
if not (big is S.true or big is S.false):
raise ValueError('m > 1 did not evaluate; try to simplify %s' % m)
elif big:
c = 1/a
if c is None:
raise ValueError('inverse of %s (mod %s) does not exist' % (a, m))
return c
class Number(AtomicExpr):
"""Represents atomic numbers in SymPy.
Explanation
===========
Floating point numbers are represented by the Float class.
Rational numbers (of any size) are represented by the Rational class.
Integer numbers (of any size) are represented by the Integer class.
Float and Rational are subclasses of Number; Integer is a subclass
of Rational.
For example, ``2/3`` is represented as ``Rational(2, 3)`` which is
a different object from the floating point number obtained with
Python division ``2/3``. Even for numbers that are exactly
represented in binary, there is a difference between how two forms,
such as ``Rational(1, 2)`` and ``Float(0.5)``, are used in SymPy.
The rational form is to be preferred in symbolic computations.
Other kinds of numbers, such as algebraic numbers ``sqrt(2)`` or
complex numbers ``3 + 4*I``, are not instances of Number class as
they are not atomic.
See Also
========
Float, Integer, Rational
"""
is_commutative = True
is_number = True
is_Number = True
__slots__ = ()
# Used to make max(x._prec, y._prec) return x._prec when only x is a float
_prec = -1
def __new__(cls, *obj):
if len(obj) == 1:
obj = obj[0]
if isinstance(obj, Number):
return obj
if isinstance(obj, SYMPY_INTS):
return Integer(obj)
if isinstance(obj, tuple) and len(obj) == 2:
return Rational(*obj)
if isinstance(obj, (float, mpmath.mpf, decimal.Decimal)):
return Float(obj)
if isinstance(obj, str):
_obj = obj.lower() # float('INF') == float('inf')
if _obj == 'nan':
return S.NaN
elif _obj == 'inf':
return S.Infinity
elif _obj == '+inf':
return S.Infinity
elif _obj == '-inf':
return S.NegativeInfinity
val = sympify(obj)
if isinstance(val, Number):
return val
else:
raise ValueError('String "%s" does not denote a Number' % obj)
msg = "expected str|int|long|float|Decimal|Number object but got %r"
raise TypeError(msg % type(obj).__name__)
def invert(self, other, *gens, **args):
from sympy.polys.polytools import invert
if getattr(other, 'is_number', True):
return mod_inverse(self, other)
return invert(self, other, *gens, **args)
def __divmod__(self, other):
from .containers import Tuple
from sympy.functions.elementary.complexes import sign
try:
other = Number(other)
if self.is_infinite or S.NaN in (self, other):
return (S.NaN, S.NaN)
except TypeError:
return NotImplemented
if not other:
raise ZeroDivisionError('modulo by zero')
if self.is_Integer and other.is_Integer:
return Tuple(*divmod(self.p, other.p))
elif isinstance(other, Float):
rat = self/Rational(other)
else:
rat = self/other
if other.is_finite:
w = int(rat) if rat >= 0 else int(rat) - 1
r = self - other*w
else:
w = 0 if not self or (sign(self) == sign(other)) else -1
r = other if w else self
return Tuple(w, r)
def __rdivmod__(self, other):
try:
other = Number(other)
except TypeError:
return NotImplemented
return divmod(other, self)
def _as_mpf_val(self, prec):
"""Evaluation of mpf tuple accurate to at least prec bits."""
raise NotImplementedError('%s needs ._as_mpf_val() method' %
(self.__class__.__name__))
def _eval_evalf(self, prec):
return Float._new(self._as_mpf_val(prec), prec)
def _as_mpf_op(self, prec):
prec = max(prec, self._prec)
return self._as_mpf_val(prec), prec
def __float__(self):
return mlib.to_float(self._as_mpf_val(53))
def floor(self):
raise NotImplementedError('%s needs .floor() method' %
(self.__class__.__name__))
def ceiling(self):
raise NotImplementedError('%s needs .ceiling() method' %
(self.__class__.__name__))
def __floor__(self):
return self.floor()
def __ceil__(self):
return self.ceiling()
def _eval_conjugate(self):
return self
def _eval_order(self, *symbols):
from sympy import Order
# Order(5, x, y) -> Order(1,x,y)
return Order(S.One, *symbols)
def _eval_subs(self, old, new):
if old == -self:
return -new
return self # there is no other possibility
def _eval_is_finite(self):
return True
@classmethod
def class_key(cls):
return 1, 0, 'Number'
@cacheit
def sort_key(self, order=None):
return self.class_key(), (0, ()), (), self
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.NaN:
return S.NaN
elif other is S.Infinity:
return S.Infinity
elif other is S.NegativeInfinity:
return S.NegativeInfinity
return AtomicExpr.__add__(self, other)
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.NaN:
return S.NaN
elif other is S.Infinity:
return S.NegativeInfinity
elif other is S.NegativeInfinity:
return S.Infinity
return AtomicExpr.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.NaN:
return S.NaN
elif other is S.Infinity:
if self.is_zero:
return S.NaN
elif self.is_positive:
return S.Infinity
else:
return S.NegativeInfinity
elif other is S.NegativeInfinity:
if self.is_zero:
return S.NaN
elif self.is_positive:
return S.NegativeInfinity
else:
return S.Infinity
elif isinstance(other, Tuple):
return NotImplemented
return AtomicExpr.__mul__(self, other)
@_sympifyit('other', NotImplemented)
def __truediv__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.NaN:
return S.NaN
elif other is S.Infinity or other is S.NegativeInfinity:
return S.Zero
return AtomicExpr.__truediv__(self, other)
def __eq__(self, other):
raise NotImplementedError('%s needs .__eq__() method' %
(self.__class__.__name__))
def __ne__(self, other):
raise NotImplementedError('%s needs .__ne__() method' %
(self.__class__.__name__))
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
raise NotImplementedError('%s needs .__lt__() method' %
(self.__class__.__name__))
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
raise NotImplementedError('%s needs .__le__() method' %
(self.__class__.__name__))
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
return _sympify(other).__lt__(self)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
return _sympify(other).__le__(self)
def __hash__(self):
return super().__hash__()
def is_constant(self, *wrt, **flags):
return True
def as_coeff_mul(self, *deps, rational=True, **kwargs):
# a -> c*t
if self.is_Rational or not rational:
return self, tuple()
elif self.is_negative:
return S.NegativeOne, (-self,)
return S.One, (self,)
def as_coeff_add(self, *deps):
# a -> c + t
if self.is_Rational:
return self, tuple()
return S.Zero, (self,)
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product. """
if rational and not self.is_Rational:
return S.One, self
return (self, S.One) if self else (S.One, self)
def as_coeff_Add(self, rational=False):
"""Efficiently extract the coefficient of a summation. """
if not rational:
return self, S.Zero
return S.Zero, self
def gcd(self, other):
"""Compute GCD of `self` and `other`. """
from sympy.polys import gcd
return gcd(self, other)
def lcm(self, other):
"""Compute LCM of `self` and `other`. """
from sympy.polys import lcm
return lcm(self, other)
def cofactors(self, other):
"""Compute GCD and cofactors of `self` and `other`. """
from sympy.polys import cofactors
return cofactors(self, other)
class Float(Number):
"""Represent a floating-point number of arbitrary precision.
Examples
========
>>> from sympy import Float
>>> Float(3.5)
3.50000000000000
>>> Float(3)
3.00000000000000
Creating Floats from strings (and Python ``int`` and ``long``
types) will give a minimum precision of 15 digits, but the
precision will automatically increase to capture all digits
entered.
>>> Float(1)
1.00000000000000
>>> Float(10**20)
100000000000000000000.
>>> Float('1e20')
100000000000000000000.
However, *floating-point* numbers (Python ``float`` types) retain
only 15 digits of precision:
>>> Float(1e20)
1.00000000000000e+20
>>> Float(1.23456789123456789)
1.23456789123457
It may be preferable to enter high-precision decimal numbers
as strings:
>>> Float('1.23456789123456789')
1.23456789123456789
The desired number of digits can also be specified:
>>> Float('1e-3', 3)
0.00100
>>> Float(100, 4)
100.0
Float can automatically count significant figures if a null string
is sent for the precision; spaces or underscores are also allowed. (Auto-
counting is only allowed for strings, ints and longs).
>>> Float('123 456 789.123_456', '')
123456789.123456
>>> Float('12e-3', '')
0.012
>>> Float(3, '')
3.
If a number is written in scientific notation, only the digits before the
exponent are considered significant if a decimal appears, otherwise the
"e" signifies only how to move the decimal:
>>> Float('60.e2', '') # 2 digits significant
6.0e+3
>>> Float('60e2', '') # 4 digits significant
6000.
>>> Float('600e-2', '') # 3 digits significant
6.00
Notes
=====
Floats are inexact by their nature unless their value is a binary-exact
value.
>>> approx, exact = Float(.1, 1), Float(.125, 1)
For calculation purposes, evalf needs to be able to change the precision
but this will not increase the accuracy of the inexact value. The
following is the most accurate 5-digit approximation of a value of 0.1
that had only 1 digit of precision:
>>> approx.evalf(5)
0.099609
By contrast, 0.125 is exact in binary (as it is in base 10) and so it
can be passed to Float or evalf to obtain an arbitrary precision with
matching accuracy:
>>> Float(exact, 5)
0.12500
>>> exact.evalf(20)
0.12500000000000000000
Trying to make a high-precision Float from a float is not disallowed,
but one must keep in mind that the *underlying float* (not the apparent
decimal value) is being obtained with high precision. For example, 0.3
does not have a finite binary representation. The closest rational is
the fraction 5404319552844595/2**54. So if you try to obtain a Float of
0.3 to 20 digits of precision you will not see the same thing as 0.3
followed by 19 zeros:
>>> Float(0.3, 20)
0.29999999999999998890
If you want a 20-digit value of the decimal 0.3 (not the floating point
approximation of 0.3) you should send the 0.3 as a string. The underlying
representation is still binary but a higher precision than Python's float
is used:
>>> Float('0.3', 20)
0.30000000000000000000
Although you can increase the precision of an existing Float using Float
it will not increase the accuracy -- the underlying value is not changed:
>>> def show(f): # binary rep of Float
... from sympy import Mul, Pow
... s, m, e, b = f._mpf_
... v = Mul(int(m), Pow(2, int(e), evaluate=False), evaluate=False)
... print('%s at prec=%s' % (v, f._prec))
...
>>> t = Float('0.3', 3)
>>> show(t)
4915/2**14 at prec=13
>>> show(Float(t, 20)) # higher prec, not higher accuracy
4915/2**14 at prec=70
>>> show(Float(t, 2)) # lower prec
307/2**10 at prec=10
The same thing happens when evalf is used on a Float:
>>> show(t.evalf(20))
4915/2**14 at prec=70
>>> show(t.evalf(2))
307/2**10 at prec=10
Finally, Floats can be instantiated with an mpf tuple (n, c, p) to
produce the number (-1)**n*c*2**p:
>>> n, c, p = 1, 5, 0
>>> (-1)**n*c*2**p
-5
>>> Float((1, 5, 0))
-5.00000000000000
An actual mpf tuple also contains the number of bits in c as the last
element of the tuple:
>>> _._mpf_
(1, 5, 0, 3)
This is not needed for instantiation and is not the same thing as the
precision. The mpf tuple and the precision are two separate quantities
that Float tracks.
In SymPy, a Float is a number that can be computed with arbitrary
precision. Although floating point 'inf' and 'nan' are not such
numbers, Float can create these numbers:
>>> Float('-inf')
-oo
>>> _.is_Float
False
"""
__slots__ = ('_mpf_', '_prec')
# A Float represents many real numbers,
# both rational and irrational.
is_rational = None
is_irrational = None
is_number = True
is_real = True
is_extended_real = True
is_Float = True
def __new__(cls, num, dps=None, prec=None, precision=None):
if prec is not None:
SymPyDeprecationWarning(
feature="Using 'prec=XX' to denote decimal precision",
useinstead="'dps=XX' for decimal precision and 'precision=XX' "\
"for binary precision",
issue=12820,
deprecated_since_version="1.1").warn()
dps = prec
del prec # avoid using this deprecated kwarg
if dps is not None and precision is not None:
raise ValueError('Both decimal and binary precision supplied. '
'Supply only one. ')
if isinstance(num, str):
# Float accepts spaces as digit separators
num = num.replace(' ', '').lower()
# in Py 3.6
# underscores are allowed. In anticipation of that, we ignore
# legally placed underscores
if '_' in num:
parts = num.split('_')
if not (all(parts) and
all(parts[i][-1].isdigit()
for i in range(0, len(parts), 2)) and
all(parts[i][0].isdigit()
for i in range(1, len(parts), 2))):
# copy Py 3.6 error
raise ValueError("could not convert string to float: '%s'" % num)
num = ''.join(parts)
if num.startswith('.') and len(num) > 1:
num = '0' + num
elif num.startswith('-.') and len(num) > 2:
num = '-0.' + num[2:]
elif num in ('inf', '+inf'):
return S.Infinity
elif num == '-inf':
return S.NegativeInfinity
elif isinstance(num, float) and num == 0:
num = '0'
elif isinstance(num, float) and num == float('inf'):
return S.Infinity
elif isinstance(num, float) and num == float('-inf'):
return S.NegativeInfinity
elif isinstance(num, float) and num == float('nan'):
return S.NaN
elif isinstance(num, (SYMPY_INTS, Integer)):
num = str(num)
elif num is S.Infinity:
return num
elif num is S.NegativeInfinity:
return num
elif num is S.NaN:
return num
elif _is_numpy_instance(num): # support for numpy datatypes
num = _convert_numpy_types(num)
elif isinstance(num, mpmath.mpf):
if precision is None:
if dps is None:
precision = num.context.prec
num = num._mpf_
if dps is None and precision is None:
dps = 15
if isinstance(num, Float):
return num
if isinstance(num, str) and _literal_float(num):
try:
Num = decimal.Decimal(num)
except decimal.InvalidOperation:
pass
else:
isint = '.' not in num
num, dps = _decimal_to_Rational_prec(Num)
if num.is_Integer and isint:
dps = max(dps, len(str(num).lstrip('-')))
dps = max(15, dps)
precision = mlib.libmpf.dps_to_prec(dps)
elif precision == '' and dps is None or precision is None and dps == '':
if not isinstance(num, str):
raise ValueError('The null string can only be used when '
'the number to Float is passed as a string or an integer.')
ok = None
if _literal_float(num):
try:
Num = decimal.Decimal(num)
except decimal.InvalidOperation:
pass
else:
isint = '.' not in num
num, dps = _decimal_to_Rational_prec(Num)
if num.is_Integer and isint:
dps = max(dps, len(str(num).lstrip('-')))
precision = mlib.libmpf.dps_to_prec(dps)
ok = True
if ok is None:
raise ValueError('string-float not recognized: %s' % num)
# decimal precision(dps) is set and maybe binary precision(precision)
# as well.From here on binary precision is used to compute the Float.
# Hence, if supplied use binary precision else translate from decimal
# precision.
if precision is None or precision == '':
precision = mlib.libmpf.dps_to_prec(dps)
precision = int(precision)
if isinstance(num, float):
_mpf_ = mlib.from_float(num, precision, rnd)
elif isinstance(num, str):
_mpf_ = mlib.from_str(num, precision, rnd)
elif isinstance(num, decimal.Decimal):
if num.is_finite():
_mpf_ = mlib.from_str(str(num), precision, rnd)
elif num.is_nan():
return S.NaN
elif num.is_infinite():
if num > 0:
return S.Infinity
return S.NegativeInfinity
else:
raise ValueError("unexpected decimal value %s" % str(num))
elif isinstance(num, tuple) and len(num) in (3, 4):
if type(num[1]) is str:
# it's a hexadecimal (coming from a pickled object)
# assume that it is in standard form
num = list(num)
# If we're loading an object pickled in Python 2 into
# Python 3, we may need to strip a tailing 'L' because
# of a shim for int on Python 3, see issue #13470.
if num[1].endswith('L'):
num[1] = num[1][:-1]
num[1] = MPZ(num[1], 16)
_mpf_ = tuple(num)
else:
if len(num) == 4:
# handle normalization hack
return Float._new(num, precision)
else:
if not all((
num[0] in (0, 1),
num[1] >= 0,
all(type(i) in (int, int) for i in num)
)):
raise ValueError('malformed mpf: %s' % (num,))
# don't compute number or else it may
# over/underflow
return Float._new(
(num[0], num[1], num[2], bitcount(num[1])),
precision)
else:
try:
_mpf_ = num._as_mpf_val(precision)
except (NotImplementedError, AttributeError):
_mpf_ = mpmath.mpf(num, prec=precision)._mpf_
return cls._new(_mpf_, precision, zero=False)
@classmethod
def _new(cls, _mpf_, _prec, zero=True):
# special cases
if zero and _mpf_ == fzero:
return S.Zero # Float(0) -> 0.0; Float._new((0,0,0,0)) -> 0
elif _mpf_ == _mpf_nan:
return S.NaN
elif _mpf_ == _mpf_inf:
return S.Infinity
elif _mpf_ == _mpf_ninf:
return S.NegativeInfinity
obj = Expr.__new__(cls)
obj._mpf_ = mpf_norm(_mpf_, _prec)
obj._prec = _prec
return obj
# mpz can't be pickled
def __getnewargs__(self):
return (mlib.to_pickable(self._mpf_),)
def __getstate__(self):
return {'_prec': self._prec}
def _hashable_content(self):
return (self._mpf_, self._prec)
def floor(self):
return Integer(int(mlib.to_int(
mlib.mpf_floor(self._mpf_, self._prec))))
def ceiling(self):
return Integer(int(mlib.to_int(
mlib.mpf_ceil(self._mpf_, self._prec))))
def __floor__(self):
return self.floor()
def __ceil__(self):
return self.ceiling()
@property
def num(self):
return mpmath.mpf(self._mpf_)
def _as_mpf_val(self, prec):
rv = mpf_norm(self._mpf_, prec)
if rv != self._mpf_ and self._prec == prec:
debug(self._mpf_, rv)
return rv
def _as_mpf_op(self, prec):
return self._mpf_, max(prec, self._prec)
def _eval_is_finite(self):
if self._mpf_ in (_mpf_inf, _mpf_ninf):
return False
return True
def _eval_is_infinite(self):
if self._mpf_ in (_mpf_inf, _mpf_ninf):
return True
return False
def _eval_is_integer(self):
return self._mpf_ == fzero
def _eval_is_negative(self):
if self._mpf_ == _mpf_ninf or self._mpf_ == _mpf_inf:
return False
return self.num < 0
def _eval_is_positive(self):
if self._mpf_ == _mpf_ninf or self._mpf_ == _mpf_inf:
return False
return self.num > 0
def _eval_is_extended_negative(self):
if self._mpf_ == _mpf_ninf:
return True
if self._mpf_ == _mpf_inf:
return False
return self.num < 0
def _eval_is_extended_positive(self):
if self._mpf_ == _mpf_inf:
return True
if self._mpf_ == _mpf_ninf:
return False
return self.num > 0
def _eval_is_zero(self):
return self._mpf_ == fzero
def __bool__(self):
return self._mpf_ != fzero
def __neg__(self):
return Float._new(mlib.mpf_neg(self._mpf_), self._prec)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_add(self._mpf_, rhs, prec, rnd), prec)
return Number.__add__(self, other)
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_sub(self._mpf_, rhs, prec, rnd), prec)
return Number.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mul(self._mpf_, rhs, prec, rnd), prec)
return Number.__mul__(self, other)
@_sympifyit('other', NotImplemented)
def __truediv__(self, other):
if isinstance(other, Number) and other != 0 and global_parameters.evaluate:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_div(self._mpf_, rhs, prec, rnd), prec)
return Number.__truediv__(self, other)
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if isinstance(other, Rational) and other.q != 1 and global_parameters.evaluate:
# calculate mod with Rationals, *then* round the result
return Float(Rational.__mod__(Rational(self), other),
precision=self._prec)
if isinstance(other, Float) and global_parameters.evaluate:
r = self/other
if r == int(r):
return Float(0, precision=max(self._prec, other._prec))
if isinstance(other, Number) and global_parameters.evaluate:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mod(self._mpf_, rhs, prec, rnd), prec)
return Number.__mod__(self, other)
@_sympifyit('other', NotImplemented)
def __rmod__(self, other):
if isinstance(other, Float) and global_parameters.evaluate:
return other.__mod__(self)
if isinstance(other, Number) and global_parameters.evaluate:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mod(rhs, self._mpf_, prec, rnd), prec)
return Number.__rmod__(self, other)
def _eval_power(self, expt):
"""
expt is symbolic object but not equal to 0, 1
(-p)**r -> exp(r*log(-p)) -> exp(r*(log(p) + I*Pi)) ->
-> p**r*(sin(Pi*r) + cos(Pi*r)*I)
"""
if self == 0:
if expt.is_positive:
return S.Zero
if expt.is_negative:
return S.Infinity
if isinstance(expt, Number):
if isinstance(expt, Integer):
prec = self._prec
return Float._new(
mlib.mpf_pow_int(self._mpf_, expt.p, prec, rnd), prec)
elif isinstance(expt, Rational) and \
expt.p == 1 and expt.q % 2 and self.is_negative:
return Pow(S.NegativeOne, expt, evaluate=False)*(
-self)._eval_power(expt)
expt, prec = expt._as_mpf_op(self._prec)
mpfself = self._mpf_
try:
y = mpf_pow(mpfself, expt, prec, rnd)
return Float._new(y, prec)
except mlib.ComplexResult:
re, im = mlib.mpc_pow(
(mpfself, fzero), (expt, fzero), prec, rnd)
return Float._new(re, prec) + \
Float._new(im, prec)*S.ImaginaryUnit
def __abs__(self):
return Float._new(mlib.mpf_abs(self._mpf_), self._prec)
def __int__(self):
if self._mpf_ == fzero:
return 0
return int(mlib.to_int(self._mpf_)) # uses round_fast = round_down
def __eq__(self, other):
from sympy.logic.boolalg import Boolean
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if not self:
return not other
if isinstance(other, Boolean):
return False
if other.is_NumberSymbol:
if other.is_irrational:
return False
return other.__eq__(self)
if other.is_Float:
# comparison is exact
# so Float(.1, 3) != Float(.1, 33)
return self._mpf_ == other._mpf_
if other.is_Rational:
return other.__eq__(self)
if other.is_Number:
# numbers should compare at the same precision;
# all _as_mpf_val routines should be sure to abide
# by the request to change the prec if necessary; if
# they don't, the equality test will fail since it compares
# the mpf tuples
ompf = other._as_mpf_val(self._prec)
return bool(mlib.mpf_eq(self._mpf_, ompf))
return False # Float != non-Number
def __ne__(self, other):
return not self == other
def _Frel(self, other, op):
from sympy.core.numbers import prec_to_dps
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if other.is_Rational:
# test self*other.q <?> other.p without losing precision
'''
>>> f = Float(.1,2)
>>> i = 1234567890
>>> (f*i)._mpf_
(0, 471, 18, 9)
>>> mlib.mpf_mul(f._mpf_, mlib.from_int(i))
(0, 505555550955, -12, 39)
'''
smpf = mlib.mpf_mul(self._mpf_, mlib.from_int(other.q))
ompf = mlib.from_int(other.p)
return _sympify(bool(op(smpf, ompf)))
elif other.is_Float:
return _sympify(bool(
op(self._mpf_, other._mpf_)))
elif other.is_comparable and other not in (
S.Infinity, S.NegativeInfinity):
other = other.evalf(prec_to_dps(self._prec))
if other._prec > 1:
if other.is_Number:
return _sympify(bool(
op(self._mpf_, other._as_mpf_val(self._prec))))
def __gt__(self, other):
if isinstance(other, NumberSymbol):
return other.__lt__(self)
rv = self._Frel(other, mlib.mpf_gt)
if rv is None:
return Expr.__gt__(self, other)
return rv
def __ge__(self, other):
if isinstance(other, NumberSymbol):
return other.__le__(self)
rv = self._Frel(other, mlib.mpf_ge)
if rv is None:
return Expr.__ge__(self, other)
return rv
def __lt__(self, other):
if isinstance(other, NumberSymbol):
return other.__gt__(self)
rv = self._Frel(other, mlib.mpf_lt)
if rv is None:
return Expr.__lt__(self, other)
return rv
def __le__(self, other):
if isinstance(other, NumberSymbol):
return other.__ge__(self)
rv = self._Frel(other, mlib.mpf_le)
if rv is None:
return Expr.__le__(self, other)
return rv
def __hash__(self):
return super().__hash__()
def epsilon_eq(self, other, epsilon="1e-15"):
return abs(self - other) < Float(epsilon)
def _sage_(self):
import sage.all as sage
return sage.RealNumber(str(self))
def __format__(self, format_spec):
return format(decimal.Decimal(str(self)), format_spec)
# Add sympify converters
converter[float] = converter[decimal.Decimal] = Float
# this is here to work nicely in Sage
RealNumber = Float
class Rational(Number):
"""Represents rational numbers (p/q) of any size.
Examples
========
>>> from sympy import Rational, nsimplify, S, pi
>>> Rational(1, 2)
1/2
Rational is unprejudiced in accepting input. If a float is passed, the
underlying value of the binary representation will be returned:
>>> Rational(.5)
1/2
>>> Rational(.2)
3602879701896397/18014398509481984
If the simpler representation of the float is desired then consider
limiting the denominator to the desired value or convert the float to
a string (which is roughly equivalent to limiting the denominator to
10**12):
>>> Rational(str(.2))
1/5
>>> Rational(.2).limit_denominator(10**12)
1/5
An arbitrarily precise Rational is obtained when a string literal is
passed:
>>> Rational("1.23")
123/100
>>> Rational('1e-2')
1/100
>>> Rational(".1")
1/10
>>> Rational('1e-2/3.2')
1/320
The conversion of other types of strings can be handled by
the sympify() function, and conversion of floats to expressions
or simple fractions can be handled with nsimplify:
>>> S('.[3]') # repeating digits in brackets
1/3
>>> S('3**2/10') # general expressions
9/10
>>> nsimplify(.3) # numbers that have a simple form
3/10
But if the input does not reduce to a literal Rational, an error will
be raised:
>>> Rational(pi)
Traceback (most recent call last):
...
TypeError: invalid input: pi
Low-level
---------
Access numerator and denominator as .p and .q:
>>> r = Rational(3, 4)
>>> r
3/4
>>> r.p
3
>>> r.q
4
Note that p and q return integers (not SymPy Integers) so some care
is needed when using them in expressions:
>>> r.p/r.q
0.75
See Also
========
sympy.core.sympify.sympify, sympy.simplify.simplify.nsimplify
"""
is_real = True
is_integer = False
is_rational = True
is_number = True
__slots__ = ('p', 'q')
is_Rational = True
@cacheit
def __new__(cls, p, q=None, gcd=None):
if q is None:
if isinstance(p, Rational):
return p
if isinstance(p, SYMPY_INTS):
pass
else:
if isinstance(p, (float, Float)):
return Rational(*_as_integer_ratio(p))
if not isinstance(p, str):
try:
p = sympify(p)
except (SympifyError, SyntaxError):
pass # error will raise below
else:
if p.count('/') > 1:
raise TypeError('invalid input: %s' % p)
p = p.replace(' ', '')
pq = p.rsplit('/', 1)
if len(pq) == 2:
p, q = pq
fp = fractions.Fraction(p)
fq = fractions.Fraction(q)
p = fp/fq
try:
p = fractions.Fraction(p)
except ValueError:
pass # error will raise below
else:
return Rational(p.numerator, p.denominator, 1)
if not isinstance(p, Rational):
raise TypeError('invalid input: %s' % p)
q = 1
gcd = 1
else:
p = Rational(p)
q = Rational(q)
if isinstance(q, Rational):
p *= q.q
q = q.p
if isinstance(p, Rational):
q *= p.q
p = p.p
# p and q are now integers
if q == 0:
if p == 0:
if _errdict["divide"]:
raise ValueError("Indeterminate 0/0")
else:
return S.NaN
return S.ComplexInfinity
if q < 0:
q = -q
p = -p
if not gcd:
gcd = igcd(abs(p), q)
if gcd > 1:
p //= gcd
q //= gcd
if q == 1:
return Integer(p)
if p == 1 and q == 2:
return S.Half
obj = Expr.__new__(cls)
obj.p = p
obj.q = q
return obj
def limit_denominator(self, max_denominator=1000000):
"""Closest Rational to self with denominator at most max_denominator.
Examples
========
>>> from sympy import Rational
>>> Rational('3.141592653589793').limit_denominator(10)
22/7
>>> Rational('3.141592653589793').limit_denominator(100)
311/99
"""
f = fractions.Fraction(self.p, self.q)
return Rational(f.limit_denominator(fractions.Fraction(int(max_denominator))))
def __getnewargs__(self):
return (self.p, self.q)
def _hashable_content(self):
return (self.p, self.q)
def _eval_is_positive(self):
return self.p > 0
def _eval_is_zero(self):
return self.p == 0
def __neg__(self):
return Rational(-self.p, self.q)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if global_parameters.evaluate:
if isinstance(other, Integer):
return Rational(self.p + self.q*other.p, self.q, 1)
elif isinstance(other, Rational):
#TODO: this can probably be optimized more
return Rational(self.p*other.q + self.q*other.p, self.q*other.q)
elif isinstance(other, Float):
return other + self
else:
return Number.__add__(self, other)
return Number.__add__(self, other)
__radd__ = __add__
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if global_parameters.evaluate:
if isinstance(other, Integer):
return Rational(self.p - self.q*other.p, self.q, 1)
elif isinstance(other, Rational):
return Rational(self.p*other.q - self.q*other.p, self.q*other.q)
elif isinstance(other, Float):
return -other + self
else:
return Number.__sub__(self, other)
return Number.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __rsub__(self, other):
if global_parameters.evaluate:
if isinstance(other, Integer):
return Rational(self.q*other.p - self.p, self.q, 1)
elif isinstance(other, Rational):
return Rational(self.q*other.p - self.p*other.q, self.q*other.q)
elif isinstance(other, Float):
return -self + other
else:
return Number.__rsub__(self, other)
return Number.__rsub__(self, other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if global_parameters.evaluate:
if isinstance(other, Integer):
return Rational(self.p*other.p, self.q, igcd(other.p, self.q))
elif isinstance(other, Rational):
return Rational(self.p*other.p, self.q*other.q, igcd(self.p, other.q)*igcd(self.q, other.p))
elif isinstance(other, Float):
return other*self
else:
return Number.__mul__(self, other)
return Number.__mul__(self, other)
__rmul__ = __mul__
@_sympifyit('other', NotImplemented)
def __truediv__(self, other):
if global_parameters.evaluate:
if isinstance(other, Integer):
if self.p and other.p == S.Zero:
return S.ComplexInfinity
else:
return Rational(self.p, self.q*other.p, igcd(self.p, other.p))
elif isinstance(other, Rational):
return Rational(self.p*other.q, self.q*other.p, igcd(self.p, other.p)*igcd(self.q, other.q))
elif isinstance(other, Float):
return self*(1/other)
else:
return Number.__truediv__(self, other)
return Number.__truediv__(self, other)
@_sympifyit('other', NotImplemented)
def __rtruediv__(self, other):
if global_parameters.evaluate:
if isinstance(other, Integer):
return Rational(other.p*self.q, self.p, igcd(self.p, other.p))
elif isinstance(other, Rational):
return Rational(other.p*self.q, other.q*self.p, igcd(self.p, other.p)*igcd(self.q, other.q))
elif isinstance(other, Float):
return other*(1/self)
else:
return Number.__rtruediv__(self, other)
return Number.__rtruediv__(self, other)
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if global_parameters.evaluate:
if isinstance(other, Rational):
n = (self.p*other.q) // (other.p*self.q)
return Rational(self.p*other.q - n*other.p*self.q, self.q*other.q)
if isinstance(other, Float):
# calculate mod with Rationals, *then* round the answer
return Float(self.__mod__(Rational(other)),
precision=other._prec)
return Number.__mod__(self, other)
return Number.__mod__(self, other)
@_sympifyit('other', NotImplemented)
def __rmod__(self, other):
if isinstance(other, Rational):
return Rational.__mod__(other, self)
return Number.__rmod__(self, other)
def _eval_power(self, expt):
if isinstance(expt, Number):
if isinstance(expt, Float):
return self._eval_evalf(expt._prec)**expt
if expt.is_extended_negative:
# (3/4)**-2 -> (4/3)**2
ne = -expt
if (ne is S.One):
return Rational(self.q, self.p)
if self.is_negative:
return S.NegativeOne**expt*Rational(self.q, -self.p)**ne
else:
return Rational(self.q, self.p)**ne
if expt is S.Infinity: # -oo already caught by test for negative
if self.p > self.q:
# (3/2)**oo -> oo
return S.Infinity
if self.p < -self.q:
# (-3/2)**oo -> oo + I*oo
return S.Infinity + S.Infinity*S.ImaginaryUnit
return S.Zero
if isinstance(expt, Integer):
# (4/3)**2 -> 4**2 / 3**2
return Rational(self.p**expt.p, self.q**expt.p, 1)
if isinstance(expt, Rational):
if self.p != 1:
# (4/3)**(5/6) -> 4**(5/6)*3**(-5/6)
return Integer(self.p)**expt*Integer(self.q)**(-expt)
# as the above caught negative self.p, now self is positive
return Integer(self.q)**Rational(
expt.p*(expt.q - 1), expt.q) / \
Integer(self.q)**Integer(expt.p)
if self.is_extended_negative and expt.is_even:
return (-self)**expt
return
def _as_mpf_val(self, prec):
return mlib.from_rational(self.p, self.q, prec, rnd)
def _mpmath_(self, prec, rnd):
return mpmath.make_mpf(mlib.from_rational(self.p, self.q, prec, rnd))
def __abs__(self):
return Rational(abs(self.p), self.q)
def __int__(self):
p, q = self.p, self.q
if p < 0:
return -int(-p//q)
return int(p//q)
def floor(self):
return Integer(self.p // self.q)
def ceiling(self):
return -Integer(-self.p // self.q)
def __floor__(self):
return self.floor()
def __ceil__(self):
return self.ceiling()
def __eq__(self, other):
from sympy.core.power import integer_log
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if not isinstance(other, Number):
# S(0) == S.false is False
# S(0) == False is True
return False
if not self:
return not other
if other.is_NumberSymbol:
if other.is_irrational:
return False
return other.__eq__(self)
if other.is_Rational:
# a Rational is always in reduced form so will never be 2/4
# so we can just check equivalence of args
return self.p == other.p and self.q == other.q
if other.is_Float:
# all Floats have a denominator that is a power of 2
# so if self doesn't, it can't be equal to other
if self.q & (self.q - 1):
return False
s, m, t = other._mpf_[:3]
if s:
m = -m
if not t:
# other is an odd integer
if not self.is_Integer or self.is_even:
return False
return m == self.p
if t > 0:
# other is an even integer
if not self.is_Integer:
return False
# does m*2**t == self.p
return self.p and not self.p % m and \
integer_log(self.p//m, 2) == (t, True)
# does non-integer s*m/2**-t = p/q?
if self.is_Integer:
return False
return m == self.p and integer_log(self.q, 2) == (-t, True)
return False
def __ne__(self, other):
return not self == other
def _Rrel(self, other, attr):
# if you want self < other, pass self, other, __gt__
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if other.is_Number:
op = None
s, o = self, other
if other.is_NumberSymbol:
op = getattr(o, attr)
elif other.is_Float:
op = getattr(o, attr)
elif other.is_Rational:
s, o = Integer(s.p*o.q), Integer(s.q*o.p)
op = getattr(o, attr)
if op:
return op(s)
if o.is_number and o.is_extended_real:
return Integer(s.p), s.q*o
def __gt__(self, other):
rv = self._Rrel(other, '__lt__')
if rv is None:
rv = self, other
elif not type(rv) is tuple:
return rv
return Expr.__gt__(*rv)
def __ge__(self, other):
rv = self._Rrel(other, '__le__')
if rv is None:
rv = self, other
elif not type(rv) is tuple:
return rv
return Expr.__ge__(*rv)
def __lt__(self, other):
rv = self._Rrel(other, '__gt__')
if rv is None:
rv = self, other
elif not type(rv) is tuple:
return rv
return Expr.__lt__(*rv)
def __le__(self, other):
rv = self._Rrel(other, '__ge__')
if rv is None:
rv = self, other
elif not type(rv) is tuple:
return rv
return Expr.__le__(*rv)
def __hash__(self):
return super().__hash__()
def factors(self, limit=None, use_trial=True, use_rho=False,
use_pm1=False, verbose=False, visual=False):
"""A wrapper to factorint which return factors of self that are
smaller than limit (or cheap to compute). Special methods of
factoring are disabled by default so that only trial division is used.
"""
from sympy.ntheory import factorrat
return factorrat(self, limit=limit, use_trial=use_trial,
use_rho=use_rho, use_pm1=use_pm1,
verbose=verbose).copy()
def numerator(self):
return self.p
def denominator(self):
return self.q
@_sympifyit('other', NotImplemented)
def gcd(self, other):
if isinstance(other, Rational):
if other == S.Zero:
return other
return Rational(
Integer(igcd(self.p, other.p)),
Integer(ilcm(self.q, other.q)))
return Number.gcd(self, other)
@_sympifyit('other', NotImplemented)
def lcm(self, other):
if isinstance(other, Rational):
return Rational(
self.p // igcd(self.p, other.p) * other.p,
igcd(self.q, other.q))
return Number.lcm(self, other)
def as_numer_denom(self):
return Integer(self.p), Integer(self.q)
def _sage_(self):
import sage.all as sage
return sage.Integer(self.p)/sage.Integer(self.q)
def as_content_primitive(self, radical=False, clear=True):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self.
Examples
========
>>> from sympy import S
>>> (S(-3)/2).as_content_primitive()
(3/2, -1)
See docstring of Expr.as_content_primitive for more examples.
"""
if self:
if self.is_positive:
return self, S.One
return -self, S.NegativeOne
return S.One, self
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product. """
return self, S.One
def as_coeff_Add(self, rational=False):
"""Efficiently extract the coefficient of a summation. """
return self, S.Zero
class Integer(Rational):
"""Represents integer numbers of any size.
Examples
========
>>> from sympy import Integer
>>> Integer(3)
3
If a float or a rational is passed to Integer, the fractional part
will be discarded; the effect is of rounding toward zero.
>>> Integer(3.8)
3
>>> Integer(-3.8)
-3
A string is acceptable input if it can be parsed as an integer:
>>> Integer("9" * 20)
99999999999999999999
It is rarely needed to explicitly instantiate an Integer, because
Python integers are automatically converted to Integer when they
are used in SymPy expressions.
"""
q = 1
is_integer = True
is_number = True
is_Integer = True
__slots__ = ('p',)
def _as_mpf_val(self, prec):
return mlib.from_int(self.p, prec, rnd)
def _mpmath_(self, prec, rnd):
return mpmath.make_mpf(self._as_mpf_val(prec))
@cacheit
def __new__(cls, i):
if isinstance(i, str):
i = i.replace(' ', '')
# whereas we cannot, in general, make a Rational from an
# arbitrary expression, we can make an Integer unambiguously
# (except when a non-integer expression happens to round to
# an integer). So we proceed by taking int() of the input and
# let the int routines determine whether the expression can
# be made into an int or whether an error should be raised.
try:
ival = int(i)
except TypeError:
raise TypeError(
"Argument of Integer should be of numeric type, got %s." % i)
# We only work with well-behaved integer types. This converts, for
# example, numpy.int32 instances.
if ival == 1:
return S.One
if ival == -1:
return S.NegativeOne
if ival == 0:
return S.Zero
obj = Expr.__new__(cls)
obj.p = ival
return obj
def __getnewargs__(self):
return (self.p,)
# Arithmetic operations are here for efficiency
def __int__(self):
return self.p
def floor(self):
return Integer(self.p)
def ceiling(self):
return Integer(self.p)
def __floor__(self):
return self.floor()
def __ceil__(self):
return self.ceiling()
def __neg__(self):
return Integer(-self.p)
def __abs__(self):
if self.p >= 0:
return self
else:
return Integer(-self.p)
def __divmod__(self, other):
from .containers import Tuple
if isinstance(other, Integer) and global_parameters.evaluate:
return Tuple(*(divmod(self.p, other.p)))
else:
return Number.__divmod__(self, other)
def __rdivmod__(self, other):
from .containers import Tuple
if isinstance(other, int) and global_parameters.evaluate:
return Tuple(*(divmod(other, self.p)))
else:
try:
other = Number(other)
except TypeError:
msg = "unsupported operand type(s) for divmod(): '%s' and '%s'"
oname = type(other).__name__
sname = type(self).__name__
raise TypeError(msg % (oname, sname))
return Number.__divmod__(other, self)
# TODO make it decorator + bytecodehacks?
def __add__(self, other):
if global_parameters.evaluate:
if isinstance(other, int):
return Integer(self.p + other)
elif isinstance(other, Integer):
return Integer(self.p + other.p)
elif isinstance(other, Rational):
return Rational(self.p*other.q + other.p, other.q, 1)
return Rational.__add__(self, other)
else:
return Add(self, other)
def __radd__(self, other):
if global_parameters.evaluate:
if isinstance(other, int):
return Integer(other + self.p)
elif isinstance(other, Rational):
return Rational(other.p + self.p*other.q, other.q, 1)
return Rational.__radd__(self, other)
return Rational.__radd__(self, other)
def __sub__(self, other):
if global_parameters.evaluate:
if isinstance(other, int):
return Integer(self.p - other)
elif isinstance(other, Integer):
return Integer(self.p - other.p)
elif isinstance(other, Rational):
return Rational(self.p*other.q - other.p, other.q, 1)
return Rational.__sub__(self, other)
return Rational.__sub__(self, other)
def __rsub__(self, other):
if global_parameters.evaluate:
if isinstance(other, int):
return Integer(other - self.p)
elif isinstance(other, Rational):
return Rational(other.p - self.p*other.q, other.q, 1)
return Rational.__rsub__(self, other)
return Rational.__rsub__(self, other)
def __mul__(self, other):
if global_parameters.evaluate:
if isinstance(other, int):
return Integer(self.p*other)
elif isinstance(other, Integer):
return Integer(self.p*other.p)
elif isinstance(other, Rational):
return Rational(self.p*other.p, other.q, igcd(self.p, other.q))
return Rational.__mul__(self, other)
return Rational.__mul__(self, other)
def __rmul__(self, other):
if global_parameters.evaluate:
if isinstance(other, int):
return Integer(other*self.p)
elif isinstance(other, Rational):
return Rational(other.p*self.p, other.q, igcd(self.p, other.q))
return Rational.__rmul__(self, other)
return Rational.__rmul__(self, other)
def __mod__(self, other):
if global_parameters.evaluate:
if isinstance(other, int):
return Integer(self.p % other)
elif isinstance(other, Integer):
return Integer(self.p % other.p)
return Rational.__mod__(self, other)
return Rational.__mod__(self, other)
def __rmod__(self, other):
if global_parameters.evaluate:
if isinstance(other, int):
return Integer(other % self.p)
elif isinstance(other, Integer):
return Integer(other.p % self.p)
return Rational.__rmod__(self, other)
return Rational.__rmod__(self, other)
def __eq__(self, other):
if isinstance(other, int):
return (self.p == other)
elif isinstance(other, Integer):
return (self.p == other.p)
return Rational.__eq__(self, other)
def __ne__(self, other):
return not self == other
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if other.is_Integer:
return _sympify(self.p > other.p)
return Rational.__gt__(self, other)
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if other.is_Integer:
return _sympify(self.p < other.p)
return Rational.__lt__(self, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if other.is_Integer:
return _sympify(self.p >= other.p)
return Rational.__ge__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if other.is_Integer:
return _sympify(self.p <= other.p)
return Rational.__le__(self, other)
def __hash__(self):
return hash(self.p)
def __index__(self):
return self.p
########################################
def _eval_is_odd(self):
return bool(self.p % 2)
def _eval_power(self, expt):
"""
Tries to do some simplifications on self**expt
Returns None if no further simplifications can be done.
Explanation
===========
When exponent is a fraction (so we have for example a square root),
we try to find a simpler representation by factoring the argument
up to factors of 2**15, e.g.
- sqrt(4) becomes 2
- sqrt(-4) becomes 2*I
- (2**(3+7)*3**(6+7))**Rational(1,7) becomes 6*18**(3/7)
Further simplification would require a special call to factorint on
the argument which is not done here for sake of speed.
"""
from sympy.ntheory.factor_ import perfect_power
if expt is S.Infinity:
if self.p > S.One:
return S.Infinity
# cases -1, 0, 1 are done in their respective classes
return S.Infinity + S.ImaginaryUnit*S.Infinity
if expt is S.NegativeInfinity:
return Rational(1, self)**S.Infinity
if not isinstance(expt, Number):
# simplify when expt is even
# (-2)**k --> 2**k
if self.is_negative and expt.is_even:
return (-self)**expt
if isinstance(expt, Float):
# Rational knows how to exponentiate by a Float
return super()._eval_power(expt)
if not isinstance(expt, Rational):
return
if expt is S.Half and self.is_negative:
# we extract I for this special case since everyone is doing so
return S.ImaginaryUnit*Pow(-self, expt)
if expt.is_negative:
# invert base and change sign on exponent
ne = -expt
if self.is_negative:
return S.NegativeOne**expt*Rational(1, -self)**ne
else:
return Rational(1, self.p)**ne
# see if base is a perfect root, sqrt(4) --> 2
x, xexact = integer_nthroot(abs(self.p), expt.q)
if xexact:
# if it's a perfect root we've finished
result = Integer(x**abs(expt.p))
if self.is_negative:
result *= S.NegativeOne**expt
return result
# The following is an algorithm where we collect perfect roots
# from the factors of base.
# if it's not an nth root, it still might be a perfect power
b_pos = int(abs(self.p))
p = perfect_power(b_pos)
if p is not False:
dict = {p[0]: p[1]}
else:
dict = Integer(b_pos).factors(limit=2**15)
# now process the dict of factors
out_int = 1 # integer part
out_rad = 1 # extracted radicals
sqr_int = 1
sqr_gcd = 0
sqr_dict = {}
for prime, exponent in dict.items():
exponent *= expt.p
# remove multiples of expt.q: (2**12)**(1/10) -> 2*(2**2)**(1/10)
div_e, div_m = divmod(exponent, expt.q)
if div_e > 0:
out_int *= prime**div_e
if div_m > 0:
# see if the reduced exponent shares a gcd with e.q
# (2**2)**(1/10) -> 2**(1/5)
g = igcd(div_m, expt.q)
if g != 1:
out_rad *= Pow(prime, Rational(div_m//g, expt.q//g))
else:
sqr_dict[prime] = div_m
# identify gcd of remaining powers
for p, ex in sqr_dict.items():
if sqr_gcd == 0:
sqr_gcd = ex
else:
sqr_gcd = igcd(sqr_gcd, ex)
if sqr_gcd == 1:
break
for k, v in sqr_dict.items():
sqr_int *= k**(v//sqr_gcd)
if sqr_int == b_pos and out_int == 1 and out_rad == 1:
result = None
else:
result = out_int*out_rad*Pow(sqr_int, Rational(sqr_gcd, expt.q))
if self.is_negative:
result *= Pow(S.NegativeOne, expt)
return result
def _eval_is_prime(self):
from sympy.ntheory import isprime
return isprime(self)
def _eval_is_composite(self):
if self > 1:
return fuzzy_not(self.is_prime)
else:
return False
def as_numer_denom(self):
return self, S.One
@_sympifyit('other', NotImplemented)
def __floordiv__(self, other):
if not isinstance(other, Expr):
return NotImplemented
if isinstance(other, Integer):
return Integer(self.p // other)
return Integer(divmod(self, other)[0])
def __rfloordiv__(self, other):
return Integer(Integer(other).p // self.p)
# Add sympify converters
converter[int] = Integer
class AlgebraicNumber(Expr):
"""Class for representing algebraic numbers in SymPy. """
__slots__ = ('rep', 'root', 'alias', 'minpoly')
is_AlgebraicNumber = True
is_algebraic = True
is_number = True
def __new__(cls, expr, coeffs=None, alias=None, **args):
"""Construct a new algebraic number. """
from sympy import Poly
from sympy.polys.polyclasses import ANP, DMP
from sympy.polys.numberfields import minimal_polynomial
from sympy.core.symbol import Symbol
expr = sympify(expr)
if isinstance(expr, (tuple, Tuple)):
minpoly, root = expr
if not minpoly.is_Poly:
minpoly = Poly(minpoly)
elif expr.is_AlgebraicNumber:
minpoly, root = expr.minpoly, expr.root
else:
minpoly, root = minimal_polynomial(
expr, args.get('gen'), polys=True), expr
dom = minpoly.get_domain()
if coeffs is not None:
if not isinstance(coeffs, ANP):
rep = DMP.from_sympy_list(sympify(coeffs), 0, dom)
scoeffs = Tuple(*coeffs)
else:
rep = DMP.from_list(coeffs.to_list(), 0, dom)
scoeffs = Tuple(*coeffs.to_list())
if rep.degree() >= minpoly.degree():
rep = rep.rem(minpoly.rep)
else:
rep = DMP.from_list([1, 0], 0, dom)
scoeffs = Tuple(1, 0)
sargs = (root, scoeffs)
if alias is not None:
if not isinstance(alias, Symbol):
alias = Symbol(alias)
sargs = sargs + (alias,)
obj = Expr.__new__(cls, *sargs)
obj.rep = rep
obj.root = root
obj.alias = alias
obj.minpoly = minpoly
return obj
def __hash__(self):
return super().__hash__()
def _eval_evalf(self, prec):
return self.as_expr()._evalf(prec)
@property
def is_aliased(self):
"""Returns ``True`` if ``alias`` was set. """
return self.alias is not None
def as_poly(self, x=None):
"""Create a Poly instance from ``self``. """
from sympy import Dummy, Poly, PurePoly
if x is not None:
return Poly.new(self.rep, x)
else:
if self.alias is not None:
return Poly.new(self.rep, self.alias)
else:
return PurePoly.new(self.rep, Dummy('x'))
def as_expr(self, x=None):
"""Create a Basic expression from ``self``. """
return self.as_poly(x or self.root).as_expr().expand()
def coeffs(self):
"""Returns all SymPy coefficients of an algebraic number. """
return [ self.rep.dom.to_sympy(c) for c in self.rep.all_coeffs() ]
def native_coeffs(self):
"""Returns all native coefficients of an algebraic number. """
return self.rep.all_coeffs()
def to_algebraic_integer(self):
"""Convert ``self`` to an algebraic integer. """
from sympy import Poly
f = self.minpoly
if f.LC() == 1:
return self
coeff = f.LC()**(f.degree() - 1)
poly = f.compose(Poly(f.gen/f.LC()))
minpoly = poly*coeff
root = f.LC()*self.root
return AlgebraicNumber((minpoly, root), self.coeffs())
def _eval_simplify(self, **kwargs):
from sympy.polys import CRootOf, minpoly
measure, ratio = kwargs['measure'], kwargs['ratio']
for r in [r for r in self.minpoly.all_roots() if r.func != CRootOf]:
if minpoly(self.root - r).is_Symbol:
# use the matching root if it's simpler
if measure(r) < ratio*measure(self.root):
return AlgebraicNumber(r)
return self
class RationalConstant(Rational):
"""
Abstract base class for rationals with specific behaviors
Derived classes must define class attributes p and q and should probably all
be singletons.
"""
__slots__ = ()
def __new__(cls):
return AtomicExpr.__new__(cls)
class IntegerConstant(Integer):
__slots__ = ()
def __new__(cls):
return AtomicExpr.__new__(cls)
class Zero(IntegerConstant, metaclass=Singleton):
"""The number zero.
Zero is a singleton, and can be accessed by ``S.Zero``
Examples
========
>>> from sympy import S, Integer
>>> Integer(0) is S.Zero
True
>>> 1/S.Zero
zoo
References
==========
.. [1] https://en.wikipedia.org/wiki/Zero
"""
p = 0
q = 1
is_positive = False
is_negative = False
is_zero = True
is_number = True
is_comparable = True
__slots__ = ()
def __getnewargs__(self):
return ()
@staticmethod
def __abs__():
return S.Zero
@staticmethod
def __neg__():
return S.Zero
def _eval_power(self, expt):
if expt.is_positive:
return self
if expt.is_negative:
return S.ComplexInfinity
if expt.is_extended_real is False:
return S.NaN
# infinities are already handled with pos and neg
# tests above; now throw away leading numbers on Mul
# exponent
coeff, terms = expt.as_coeff_Mul()
if coeff.is_negative:
return S.ComplexInfinity**terms
if coeff is not S.One: # there is a Number to discard
return self**terms
def _eval_order(self, *symbols):
# Order(0,x) -> 0
return self
def __bool__(self):
return False
def as_coeff_Mul(self, rational=False): # XXX this routine should be deleted
"""Efficiently extract the coefficient of a summation. """
return S.One, self
class One(IntegerConstant, metaclass=Singleton):
"""The number one.
One is a singleton, and can be accessed by ``S.One``.
Examples
========
>>> from sympy import S, Integer
>>> Integer(1) is S.One
True
References
==========
.. [1] https://en.wikipedia.org/wiki/1_%28number%29
"""
is_number = True
p = 1
q = 1
__slots__ = ()
def __getnewargs__(self):
return ()
@staticmethod
def __abs__():
return S.One
@staticmethod
def __neg__():
return S.NegativeOne
def _eval_power(self, expt):
return self
def _eval_order(self, *symbols):
return
@staticmethod
def factors(limit=None, use_trial=True, use_rho=False, use_pm1=False,
verbose=False, visual=False):
if visual:
return S.One
else:
return {}
class NegativeOne(IntegerConstant, metaclass=Singleton):
"""The number negative one.
NegativeOne is a singleton, and can be accessed by ``S.NegativeOne``.
Examples
========
>>> from sympy import S, Integer
>>> Integer(-1) is S.NegativeOne
True
See Also
========
One
References
==========
.. [1] https://en.wikipedia.org/wiki/%E2%88%921_%28number%29
"""
is_number = True
p = -1
q = 1
__slots__ = ()
def __getnewargs__(self):
return ()
@staticmethod
def __abs__():
return S.One
@staticmethod
def __neg__():
return S.One
def _eval_power(self, expt):
if expt.is_odd:
return S.NegativeOne
if expt.is_even:
return S.One
if isinstance(expt, Number):
if isinstance(expt, Float):
return Float(-1.0)**expt
if expt is S.NaN:
return S.NaN
if expt is S.Infinity or expt is S.NegativeInfinity:
return S.NaN
if expt is S.Half:
return S.ImaginaryUnit
if isinstance(expt, Rational):
if expt.q == 2:
return S.ImaginaryUnit**Integer(expt.p)
i, r = divmod(expt.p, expt.q)
if i:
return self**i*self**Rational(r, expt.q)
return
class Half(RationalConstant, metaclass=Singleton):
"""The rational number 1/2.
Half is a singleton, and can be accessed by ``S.Half``.
Examples
========
>>> from sympy import S, Rational
>>> Rational(1, 2) is S.Half
True
References
==========
.. [1] https://en.wikipedia.org/wiki/One_half
"""
is_number = True
p = 1
q = 2
__slots__ = ()
def __getnewargs__(self):
return ()
@staticmethod
def __abs__():
return S.Half
class Infinity(Number, metaclass=Singleton):
r"""Positive infinite quantity.
Explanation
===========
In real analysis the symbol `\infty` denotes an unbounded
limit: `x\to\infty` means that `x` grows without bound.
Infinity is often used not only to define a limit but as a value
in the affinely extended real number system. Points labeled `+\infty`
and `-\infty` can be added to the topological space of the real numbers,
producing the two-point compactification of the real numbers. Adding
algebraic properties to this gives us the extended real numbers.
Infinity is a singleton, and can be accessed by ``S.Infinity``,
or can be imported as ``oo``.
Examples
========
>>> from sympy import oo, exp, limit, Symbol
>>> 1 + oo
oo
>>> 42/oo
0
>>> x = Symbol('x')
>>> limit(exp(x), x, oo)
oo
See Also
========
NegativeInfinity, NaN
References
==========
.. [1] https://en.wikipedia.org/wiki/Infinity
"""
is_commutative = True
is_number = True
is_complex = False
is_extended_real = True
is_infinite = True
is_comparable = True
is_extended_positive = True
is_prime = False
__slots__ = ()
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"\infty"
def _eval_subs(self, old, new):
if self == old:
return new
def _eval_evalf(self, prec=None):
return Float('inf')
def evalf(self, prec=None, **options):
return self._eval_evalf(prec)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.NegativeInfinity or other is S.NaN:
return S.NaN
return self
return Number.__add__(self, other)
__radd__ = __add__
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.Infinity or other is S.NaN:
return S.NaN
return self
return Number.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __rsub__(self, other):
return (-self).__add__(other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other.is_zero or other is S.NaN:
return S.NaN
if other.is_extended_positive:
return self
return S.NegativeInfinity
return Number.__mul__(self, other)
__rmul__ = __mul__
@_sympifyit('other', NotImplemented)
def __truediv__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.Infinity or \
other is S.NegativeInfinity or \
other is S.NaN:
return S.NaN
if other.is_extended_nonnegative:
return self
return S.NegativeInfinity
return Number.__truediv__(self, other)
def __abs__(self):
return S.Infinity
def __neg__(self):
return S.NegativeInfinity
def _eval_power(self, expt):
"""
``expt`` is symbolic object but not equal to 0 or 1.
================ ======= ==============================
Expression Result Notes
================ ======= ==============================
``oo ** nan`` ``nan``
``oo ** -p`` ``0`` ``p`` is number, ``oo``
================ ======= ==============================
See Also
========
Pow
NaN
NegativeInfinity
"""
from sympy.functions import re
if expt.is_extended_positive:
return S.Infinity
if expt.is_extended_negative:
return S.Zero
if expt is S.NaN:
return S.NaN
if expt is S.ComplexInfinity:
return S.NaN
if expt.is_extended_real is False and expt.is_number:
expt_real = re(expt)
if expt_real.is_positive:
return S.ComplexInfinity
if expt_real.is_negative:
return S.Zero
if expt_real.is_zero:
return S.NaN
return self**expt.evalf()
def _as_mpf_val(self, prec):
return mlib.finf
def _sage_(self):
import sage.all as sage
return sage.oo
def __hash__(self):
return super().__hash__()
def __eq__(self, other):
return other is S.Infinity or other == float('inf')
def __ne__(self, other):
return other is not S.Infinity and other != float('inf')
__gt__ = Expr.__gt__
__ge__ = Expr.__ge__
__lt__ = Expr.__lt__
__le__ = Expr.__le__
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if not isinstance(other, Expr):
return NotImplemented
return S.NaN
__rmod__ = __mod__
def floor(self):
return self
def ceiling(self):
return self
oo = S.Infinity
class NegativeInfinity(Number, metaclass=Singleton):
"""Negative infinite quantity.
NegativeInfinity is a singleton, and can be accessed
by ``S.NegativeInfinity``.
See Also
========
Infinity
"""
is_extended_real = True
is_complex = False
is_commutative = True
is_infinite = True
is_comparable = True
is_extended_negative = True
is_number = True
is_prime = False
__slots__ = ()
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"-\infty"
def _eval_subs(self, old, new):
if self == old:
return new
def _eval_evalf(self, prec=None):
return Float('-inf')
def evalf(self, prec=None, **options):
return self._eval_evalf(prec)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.Infinity or other is S.NaN:
return S.NaN
return self
return Number.__add__(self, other)
__radd__ = __add__
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.NegativeInfinity or other is S.NaN:
return S.NaN
return self
return Number.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __rsub__(self, other):
return (-self).__add__(other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other.is_zero or other is S.NaN:
return S.NaN
if other.is_extended_positive:
return self
return S.Infinity
return Number.__mul__(self, other)
__rmul__ = __mul__
@_sympifyit('other', NotImplemented)
def __truediv__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.Infinity or \
other is S.NegativeInfinity or \
other is S.NaN:
return S.NaN
if other.is_extended_nonnegative:
return self
return S.Infinity
return Number.__truediv__(self, other)
def __abs__(self):
return S.Infinity
def __neg__(self):
return S.Infinity
def _eval_power(self, expt):
"""
``expt`` is symbolic object but not equal to 0 or 1.
================ ======= ==============================
Expression Result Notes
================ ======= ==============================
``(-oo) ** nan`` ``nan``
``(-oo) ** oo`` ``nan``
``(-oo) ** -oo`` ``nan``
``(-oo) ** e`` ``oo`` ``e`` is positive even integer
``(-oo) ** o`` ``-oo`` ``o`` is positive odd integer
================ ======= ==============================
See Also
========
Infinity
Pow
NaN
"""
if expt.is_number:
if expt is S.NaN or \
expt is S.Infinity or \
expt is S.NegativeInfinity:
return S.NaN
if isinstance(expt, Integer) and expt.is_extended_positive:
if expt.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
return S.NegativeOne**expt*S.Infinity**expt
def _as_mpf_val(self, prec):
return mlib.fninf
def _sage_(self):
import sage.all as sage
return -(sage.oo)
def __hash__(self):
return super().__hash__()
def __eq__(self, other):
return other is S.NegativeInfinity or other == float('-inf')
def __ne__(self, other):
return other is not S.NegativeInfinity and other != float('-inf')
__gt__ = Expr.__gt__
__ge__ = Expr.__ge__
__lt__ = Expr.__lt__
__le__ = Expr.__le__
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if not isinstance(other, Expr):
return NotImplemented
return S.NaN
__rmod__ = __mod__
def floor(self):
return self
def ceiling(self):
return self
def as_powers_dict(self):
return {S.NegativeOne: 1, S.Infinity: 1}
class NaN(Number, metaclass=Singleton):
"""
Not a Number.
Explanation
===========
This serves as a place holder for numeric values that are indeterminate.
Most operations on NaN, produce another NaN. Most indeterminate forms,
such as ``0/0`` or ``oo - oo` produce NaN. Two exceptions are ``0**0``
and ``oo**0``, which all produce ``1`` (this is consistent with Python's
float).
NaN is loosely related to floating point nan, which is defined in the
IEEE 754 floating point standard, and corresponds to the Python
``float('nan')``. Differences are noted below.
NaN is mathematically not equal to anything else, even NaN itself. This
explains the initially counter-intuitive results with ``Eq`` and ``==`` in
the examples below.
NaN is not comparable so inequalities raise a TypeError. This is in
contrast with floating point nan where all inequalities are false.
NaN is a singleton, and can be accessed by ``S.NaN``, or can be imported
as ``nan``.
Examples
========
>>> from sympy import nan, S, oo, Eq
>>> nan is S.NaN
True
>>> oo - oo
nan
>>> nan + 1
nan
>>> Eq(nan, nan) # mathematical equality
False
>>> nan == nan # structural equality
True
References
==========
.. [1] https://en.wikipedia.org/wiki/NaN
"""
is_commutative = True
is_extended_real = None
is_real = None
is_rational = None
is_algebraic = None
is_transcendental = None
is_integer = None
is_comparable = False
is_finite = None
is_zero = None
is_prime = None
is_positive = None
is_negative = None
is_number = True
__slots__ = ()
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"\text{NaN}"
def __neg__(self):
return self
@_sympifyit('other', NotImplemented)
def __add__(self, other):
return self
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
return self
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
return self
@_sympifyit('other', NotImplemented)
def __truediv__(self, other):
return self
def floor(self):
return self
def ceiling(self):
return self
def _as_mpf_val(self, prec):
return _mpf_nan
def _sage_(self):
import sage.all as sage
return sage.NaN
def __hash__(self):
return super().__hash__()
def __eq__(self, other):
# NaN is structurally equal to another NaN
return other is S.NaN
def __ne__(self, other):
return other is not S.NaN
# Expr will _sympify and raise TypeError
__gt__ = Expr.__gt__
__ge__ = Expr.__ge__
__lt__ = Expr.__lt__
__le__ = Expr.__le__
nan = S.NaN
@dispatch(NaN, Expr) # type:ignore
def _eval_is_eq(a, b): # noqa:F811
return False
class ComplexInfinity(AtomicExpr, metaclass=Singleton):
r"""Complex infinity.
Explanation
===========
In complex analysis the symbol `\tilde\infty`, called "complex
infinity", represents a quantity with infinite magnitude, but
undetermined complex phase.
ComplexInfinity is a singleton, and can be accessed by
``S.ComplexInfinity``, or can be imported as ``zoo``.
Examples
========
>>> from sympy import zoo
>>> zoo + 42
zoo
>>> 42/zoo
0
>>> zoo + zoo
nan
>>> zoo*zoo
zoo
See Also
========
Infinity
"""
is_commutative = True
is_infinite = True
is_number = True
is_prime = False
is_complex = False
is_extended_real = False
__slots__ = ()
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"\tilde{\infty}"
@staticmethod
def __abs__():
return S.Infinity
def floor(self):
return self
def ceiling(self):
return self
@staticmethod
def __neg__():
return S.ComplexInfinity
def _eval_power(self, expt):
if expt is S.ComplexInfinity:
return S.NaN
if isinstance(expt, Number):
if expt.is_zero:
return S.NaN
else:
if expt.is_positive:
return S.ComplexInfinity
else:
return S.Zero
def _sage_(self):
import sage.all as sage
return sage.UnsignedInfinityRing.gen()
zoo = S.ComplexInfinity
class NumberSymbol(AtomicExpr):
is_commutative = True
is_finite = True
is_number = True
__slots__ = ()
is_NumberSymbol = True
def __new__(cls):
return AtomicExpr.__new__(cls)
def approximation(self, number_cls):
""" Return an interval with number_cls endpoints
that contains the value of NumberSymbol.
If not implemented, then return None.
"""
def _eval_evalf(self, prec):
return Float._new(self._as_mpf_val(prec), prec)
def __eq__(self, other):
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if self is other:
return True
if other.is_Number and self.is_irrational:
return False
return False # NumberSymbol != non-(Number|self)
def __ne__(self, other):
return not self == other
def __le__(self, other):
if self is other:
return S.true
return Expr.__le__(self, other)
def __ge__(self, other):
if self is other:
return S.true
return Expr.__ge__(self, other)
def __int__(self):
# subclass with appropriate return value
raise NotImplementedError
def __hash__(self):
return super().__hash__()
class Exp1(NumberSymbol, metaclass=Singleton):
r"""The `e` constant.
Explanation
===========
The transcendental number `e = 2.718281828\ldots` is the base of the
natural logarithm and of the exponential function, `e = \exp(1)`.
Sometimes called Euler's number or Napier's constant.
Exp1 is a singleton, and can be accessed by ``S.Exp1``,
or can be imported as ``E``.
Examples
========
>>> from sympy import exp, log, E
>>> E is exp(1)
True
>>> log(E)
1
References
==========
.. [1] https://en.wikipedia.org/wiki/E_%28mathematical_constant%29
"""
is_real = True
is_positive = True
is_negative = False # XXX Forces is_negative/is_nonnegative
is_irrational = True
is_number = True
is_algebraic = False
is_transcendental = True
__slots__ = ()
def _latex(self, printer):
return r"e"
@staticmethod
def __abs__():
return S.Exp1
def __int__(self):
return 2
def _as_mpf_val(self, prec):
return mpf_e(prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (Integer(2), Integer(3))
elif issubclass(number_cls, Rational):
pass
def _eval_power(self, expt):
from sympy import exp
return exp(expt)
def _eval_rewrite_as_sin(self, **kwargs):
from sympy import sin
I = S.ImaginaryUnit
return sin(I + S.Pi/2) - I*sin(I)
def _eval_rewrite_as_cos(self, **kwargs):
from sympy import cos
I = S.ImaginaryUnit
return cos(I) + I*cos(I + S.Pi/2)
def _sage_(self):
import sage.all as sage
return sage.e
E = S.Exp1
class Pi(NumberSymbol, metaclass=Singleton):
r"""The `\pi` constant.
Explanation
===========
The transcendental number `\pi = 3.141592654\ldots` represents the ratio
of a circle's circumference to its diameter, the area of the unit circle,
the half-period of trigonometric functions, and many other things
in mathematics.
Pi is a singleton, and can be accessed by ``S.Pi``, or can
be imported as ``pi``.
Examples
========
>>> from sympy import S, pi, oo, sin, exp, integrate, Symbol
>>> S.Pi
pi
>>> pi > 3
True
>>> pi.is_irrational
True
>>> x = Symbol('x')
>>> sin(x + 2*pi)
sin(x)
>>> integrate(exp(-x**2), (x, -oo, oo))
sqrt(pi)
References
==========
.. [1] https://en.wikipedia.org/wiki/Pi
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = True
is_number = True
is_algebraic = False
is_transcendental = True
__slots__ = ()
def _latex(self, printer):
return r"\pi"
@staticmethod
def __abs__():
return S.Pi
def __int__(self):
return 3
def _as_mpf_val(self, prec):
return mpf_pi(prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (Integer(3), Integer(4))
elif issubclass(number_cls, Rational):
return (Rational(223, 71), Rational(22, 7))
def _sage_(self):
import sage.all as sage
return sage.pi
pi = S.Pi
class GoldenRatio(NumberSymbol, metaclass=Singleton):
r"""The golden ratio, `\phi`.
Explanation
===========
`\phi = \frac{1 + \sqrt{5}}{2}` is algebraic number. Two quantities
are in the golden ratio if their ratio is the same as the ratio of
their sum to the larger of the two quantities, i.e. their maximum.
GoldenRatio is a singleton, and can be accessed by ``S.GoldenRatio``.
Examples
========
>>> from sympy import S
>>> S.GoldenRatio > 1
True
>>> S.GoldenRatio.expand(func=True)
1/2 + sqrt(5)/2
>>> S.GoldenRatio.is_irrational
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Golden_ratio
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = True
is_number = True
is_algebraic = True
is_transcendental = False
__slots__ = ()
def _latex(self, printer):
return r"\phi"
def __int__(self):
return 1
def _as_mpf_val(self, prec):
# XXX track down why this has to be increased
rv = mlib.from_man_exp(phi_fixed(prec + 10), -prec - 10)
return mpf_norm(rv, prec)
def _eval_expand_func(self, **hints):
from sympy import sqrt
return S.Half + S.Half*sqrt(5)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.One, Rational(2))
elif issubclass(number_cls, Rational):
pass
def _sage_(self):
import sage.all as sage
return sage.golden_ratio
_eval_rewrite_as_sqrt = _eval_expand_func
class TribonacciConstant(NumberSymbol, metaclass=Singleton):
r"""The tribonacci constant.
Explanation
===========
The tribonacci numbers are like the Fibonacci numbers, but instead
of starting with two predetermined terms, the sequence starts with
three predetermined terms and each term afterwards is the sum of the
preceding three terms.
The tribonacci constant is the ratio toward which adjacent tribonacci
numbers tend. It is a root of the polynomial `x^3 - x^2 - x - 1 = 0`,
and also satisfies the equation `x + x^{-3} = 2`.
TribonacciConstant is a singleton, and can be accessed
by ``S.TribonacciConstant``.
Examples
========
>>> from sympy import S
>>> S.TribonacciConstant > 1
True
>>> S.TribonacciConstant.expand(func=True)
1/3 + (19 - 3*sqrt(33))**(1/3)/3 + (3*sqrt(33) + 19)**(1/3)/3
>>> S.TribonacciConstant.is_irrational
True
>>> S.TribonacciConstant.n(20)
1.8392867552141611326
References
==========
.. [1] https://en.wikipedia.org/wiki/Generalizations_of_Fibonacci_numbers#Tribonacci_numbers
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = True
is_number = True
is_algebraic = True
is_transcendental = False
__slots__ = ()
def _latex(self, printer):
return r"\text{TribonacciConstant}"
def __int__(self):
return 2
def _eval_evalf(self, prec):
rv = self._eval_expand_func(function=True)._eval_evalf(prec + 4)
return Float(rv, precision=prec)
def _eval_expand_func(self, **hints):
from sympy import sqrt, cbrt
return (1 + cbrt(19 - 3*sqrt(33)) + cbrt(19 + 3*sqrt(33))) / 3
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.One, Rational(2))
elif issubclass(number_cls, Rational):
pass
_eval_rewrite_as_sqrt = _eval_expand_func
class EulerGamma(NumberSymbol, metaclass=Singleton):
r"""The Euler-Mascheroni constant.
Explanation
===========
`\gamma = 0.5772157\ldots` (also called Euler's constant) is a mathematical
constant recurring in analysis and number theory. It is defined as the
limiting difference between the harmonic series and the
natural logarithm:
.. math:: \gamma = \lim\limits_{n\to\infty}
\left(\sum\limits_{k=1}^n\frac{1}{k} - \ln n\right)
EulerGamma is a singleton, and can be accessed by ``S.EulerGamma``.
Examples
========
>>> from sympy import S
>>> S.EulerGamma.is_irrational
>>> S.EulerGamma > 0
True
>>> S.EulerGamma > 1
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = None
is_number = True
__slots__ = ()
def _latex(self, printer):
return r"\gamma"
def __int__(self):
return 0
def _as_mpf_val(self, prec):
# XXX track down why this has to be increased
v = mlib.libhyper.euler_fixed(prec + 10)
rv = mlib.from_man_exp(v, -prec - 10)
return mpf_norm(rv, prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.Zero, S.One)
elif issubclass(number_cls, Rational):
return (S.Half, Rational(3, 5))
def _sage_(self):
import sage.all as sage
return sage.euler_gamma
class Catalan(NumberSymbol, metaclass=Singleton):
r"""Catalan's constant.
Explanation
===========
`K = 0.91596559\ldots` is given by the infinite series
.. math:: K = \sum_{k=0}^{\infty} \frac{(-1)^k}{(2k+1)^2}
Catalan is a singleton, and can be accessed by ``S.Catalan``.
Examples
========
>>> from sympy import S
>>> S.Catalan.is_irrational
>>> S.Catalan > 0
True
>>> S.Catalan > 1
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Catalan%27s_constant
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = None
is_number = True
__slots__ = ()
def __int__(self):
return 0
def _as_mpf_val(self, prec):
# XXX track down why this has to be increased
v = mlib.catalan_fixed(prec + 10)
rv = mlib.from_man_exp(v, -prec - 10)
return mpf_norm(rv, prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.Zero, S.One)
elif issubclass(number_cls, Rational):
return (Rational(9, 10), S.One)
def _eval_rewrite_as_Sum(self, k_sym=None, symbols=None):
from sympy import Sum, Dummy
if (k_sym is not None) or (symbols is not None):
return self
k = Dummy('k', integer=True, nonnegative=True)
return Sum((-1)**k / (2*k+1)**2, (k, 0, S.Infinity))
def _sage_(self):
import sage.all as sage
return sage.catalan
class ImaginaryUnit(AtomicExpr, metaclass=Singleton):
r"""The imaginary unit, `i = \sqrt{-1}`.
I is a singleton, and can be accessed by ``S.I``, or can be
imported as ``I``.
Examples
========
>>> from sympy import I, sqrt
>>> sqrt(-1)
I
>>> I*I
-1
>>> 1/I
-I
References
==========
.. [1] https://en.wikipedia.org/wiki/Imaginary_unit
"""
is_commutative = True
is_imaginary = True
is_finite = True
is_number = True
is_algebraic = True
is_transcendental = False
__slots__ = ()
def _latex(self, printer):
return printer._settings['imaginary_unit_latex']
@staticmethod
def __abs__():
return S.One
def _eval_evalf(self, prec):
return self
def _eval_conjugate(self):
return -S.ImaginaryUnit
def _eval_power(self, expt):
"""
b is I = sqrt(-1)
e is symbolic object but not equal to 0, 1
I**r -> (-1)**(r/2) -> exp(r/2*Pi*I) -> sin(Pi*r/2) + cos(Pi*r/2)*I, r is decimal
I**0 mod 4 -> 1
I**1 mod 4 -> I
I**2 mod 4 -> -1
I**3 mod 4 -> -I
"""
if isinstance(expt, Number):
if isinstance(expt, Integer):
expt = expt.p % 4
if expt == 0:
return S.One
if expt == 1:
return S.ImaginaryUnit
if expt == 2:
return -S.One
return -S.ImaginaryUnit
return
def as_base_exp(self):
return S.NegativeOne, S.Half
def _sage_(self):
import sage.all as sage
return sage.I
@property
def _mpc_(self):
return (Float(0)._mpf_, Float(1)._mpf_)
I = S.ImaginaryUnit
@dispatch(Tuple, Number) # type:ignore
def _eval_is_eq(self, other): # noqa: F811
return False
def sympify_fractions(f):
return Rational(f.numerator, f.denominator, 1)
converter[fractions.Fraction] = sympify_fractions
if HAS_GMPY:
def sympify_mpz(x):
return Integer(int(x))
# XXX: The sympify_mpq function here was never used because it is
# overridden by the other sympify_mpq function below. Maybe it should just
# be removed or maybe it should be used for something...
def sympify_mpq(x):
return Rational(int(x.numerator), int(x.denominator))
converter[type(gmpy.mpz(1))] = sympify_mpz
converter[type(gmpy.mpq(1, 2))] = sympify_mpq
def sympify_mpmath_mpq(x):
p, q = x._mpq_
return Rational(p, q, 1)
converter[type(mpmath.rational.mpq(1, 2))] = sympify_mpmath_mpq
def sympify_mpmath(x):
return Expr._from_mpmath(x, x.context.prec)
converter[mpnumeric] = sympify_mpmath
def sympify_complex(a):
real, imag = list(map(sympify, (a.real, a.imag)))
return real + S.ImaginaryUnit*imag
converter[complex] = sympify_complex
from .power import Pow, integer_nthroot
from .mul import Mul
Mul.identity = One()
from .add import Add
Add.identity = Zero()
def _register_classes():
numbers.Number.register(Number)
numbers.Real.register(Float)
numbers.Rational.register(Rational)
numbers.Rational.register(Integer)
_register_classes()
| 29.402067 | 108 | 0.561447 |
a439b8f61d38e52515082a5498ca97088d36971d | 1,013 | py | Python | python_fishc/14.0.py | iisdd/Courses | a47d202e0d7e1ba85a38c6fe3dd9619eceb1045c | [
"MIT"
] | 1 | 2020-11-29T14:42:01.000Z | 2020-11-29T14:42:01.000Z | python_fishc/14.0.py | iisdd/Courses | a47d202e0d7e1ba85a38c6fe3dd9619eceb1045c | [
"MIT"
] | null | null | null | python_fishc/14.0.py | iisdd/Courses | a47d202e0d7e1ba85a38c6fe3dd9619eceb1045c | [
"MIT"
] | null | null | null | '''0. 请写一个密码安全性检查的代码代码:check.py'''
# 密码安全性检查代码
#
# 低级密码要求:
# 1. 密码由单纯的数字或字母组成
# 2. 密码长度小于等于8位
#
# 中级密码要求:
# 1. 密码必须由数字、字母或特殊字符(仅限:~!@#$%^&*()_=-/,.?<>;:[]{}|\)任意两种组合
# 2. 密码长度不能低于8位
#
# 高级密码要求:
# 1. 密码必须由数字、字母及特殊字符(仅限:~!@#$%^&*()_=-/,.?<>;:[]{}|\)三种组合
# 2. 密码只能由字母开头
# 3. 密码长度不能低于16位
def check():
symbol = '~!@#$%^&*()_=-/,.?<>;:[]{}|\\'
test = input('请输入需要检查的密码组合:')
length = len(test)
flag = 0
notice = '''请按以下方式提升宁的密码安全级别:
1.密码必须由数字、字母及特殊字符三种组合
2.密码只能由字母开头
3.密码长度不能低于16位'''
print('宁的密码安全级别评定为:' , end ='')
for each in test:
if each in symbol:
flag = 1
break
if test.isalnum() or length <= 8:
print('低')
print(notice)
elif test[0].isalpha() and length >= 16 and flag == 1 :
print('高')
print('请继续保持')
return True
else:
print('中')
print(notice)
while 1 :
if check():
break
| 19.862745 | 62 | 0.461007 |
7b71f8165676437ee4dc9b535508ee24df6b424a | 2,013 | py | Python | demos/kitchen_sink/libs/baseclass/download_file.py | p3g4asus/KivyMD | 441b3ef9daf412bd588bbb485612c5f2a53d669c | [
"MIT"
] | 1 | 2020-07-01T12:39:51.000Z | 2020-07-01T12:39:51.000Z | demos/kitchen_sink/libs/baseclass/download_file.py | p3g4asus/KivyMD | 441b3ef9daf412bd588bbb485612c5f2a53d669c | [
"MIT"
] | null | null | null | demos/kitchen_sink/libs/baseclass/download_file.py | p3g4asus/KivyMD | 441b3ef9daf412bd588bbb485612c5f2a53d669c | [
"MIT"
] | null | null | null | import os
from kivy.uix.screenmanager import Screen
from kivymd.toast import toast
class KitchenSinkDownloadFile(Screen):
def download_progress_hide(self, instance_progress, value):
"""Hides progress progress."""
self.ids.toolbar.right_action_items = [
[
"download",
lambda x: self.download_progress_show(instance_progress),
]
]
def download_progress_show(self, instance_progress):
self.set_chevron_back_screen()
instance_progress.open()
instance_progress.animation_progress_from_fade()
def show_example_download_file(self, interval):
from kivymd.uix.progressloader import MDProgressLoader
def get_connect(host="8.8.8.8", port=53, timeout=3):
import socket
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect(
(host, port)
)
return True
except (TimeoutError, ConnectionError, OSError):
return False
if get_connect():
link = (
"https://www.python.org/ftp/python/3.8.0/"
"python-3.8.0-embed-win32.zip"
)
progress = MDProgressLoader(
url_on_image=link,
path_to_file=os.path.join(
os.environ["KITCHEN_SINK_ROOT"], "python-3.8.0.zip"
),
download_complete=self.download_complete,
download_hide=self.download_progress_hide,
)
progress.start(self.ids.box_flt)
else:
toast("Connect error!")
def download_complete(self):
self.set_chevron_back_screen()
toast("Done")
def set_chevron_back_screen(self):
"""Sets the return chevron to the previous screen in ToolBar."""
self.ids.toolbar.right_action_items = [["dots-vertical", lambda x: x]]
| 31.453125 | 78 | 0.584699 |
15c06c70b78669fcd16c8fb81794076ec4ab28c1 | 450 | py | Python | examples/context_example.py | tomasvotava/retry-helper | cfbb587e38838d9e9d59c4eb80f296941db5c13f | [
"Apache-2.0"
] | 1 | 2020-11-23T09:48:33.000Z | 2020-11-23T09:48:33.000Z | examples/context_example.py | tomasvotava/retry-helper | cfbb587e38838d9e9d59c4eb80f296941db5c13f | [
"Apache-2.0"
] | null | null | null | examples/context_example.py | tomasvotava/retry-helper | cfbb587e38838d9e9d59c4eb80f296941db5c13f | [
"Apache-2.0"
] | 1 | 2021-02-01T14:25:57.000Z | 2021-02-01T14:25:57.000Z | import random
import logging
from retry_helper import RetryManager
logging.basicConfig(level=logging.DEBUG)
with RetryManager(max_attempts=3, wait_seconds=0, exceptions=(ValueError,)) as retry:
while retry:
with retry.attempt:
number = random.randint(1, 6)
if number != 6:
print("You roll", number)
raise ValueError
else:
print("Hurray you roll 6!!!") | 28.125 | 85 | 0.611111 |
05d5fbbfc64238daff37410605b9d4951d165527 | 8,738 | py | Python | code/level.py | leggettc18/zelda-style-rpg | 4f4114f263801ab09eb6b71d9fd77a80ddce727a | [
"MIT"
] | null | null | null | code/level.py | leggettc18/zelda-style-rpg | 4f4114f263801ab09eb6b71d9fd77a80ddce727a | [
"MIT"
] | null | null | null | code/level.py | leggettc18/zelda-style-rpg | 4f4114f263801ab09eb6b71d9fd77a80ddce727a | [
"MIT"
] | null | null | null | """Module containing classes and functions that manage the state of the level map and camera."""
from random import choice, randint
import pygame
from enemy import Enemy
# pylint:disable=wildcard-import,unused-wildcard-import
from settings import *
from tile import Tile
from player import Player
from weapon import Weapon
from ui import UI
from particles import AnimationPlayer
# pylint:disable=unused-import
from debug import debug
from support import *
class Level:
"""Containts data and functions for managing level state, and sprites"""
def __init__(self):
# get the display surface
self.display_surface = pygame.display.get_surface()
# sprite group setup
self.visible_sprites = YSortCameraGroup()
self.obstacle_sprites = pygame.sprite.Group()
# attack sprites
self.current_attack = None
self.attack_sprites = pygame.sprite.Group()
self.attackable_sprites = pygame.sprite.Group()
# sprite setup
self.create_map()
# user interface
# pylint:disable=invalid-name
self.ui = UI()
# Particles
self.animation_player = AnimationPlayer()
def create_map(self):
"""Imports graphics, creates sprites and adds them to sprite groups,
and creates the level map."""
layouts = {
'boundary': import_csv_layout('../map/map_FloorBlocks.csv'),
'grass': import_csv_layout('../map/map_Grass.csv'),
'object': import_csv_layout('../map/map_Objects.csv'),
'entities': import_csv_layout('../map/map_Entities.csv')
}
graphics = {
'grass': import_folder('../graphics/grass'),
'objects': import_folder('../graphics/objects')
}
for style, layout in layouts.items():
for row_index, row in enumerate(layout):
for col_index, col in enumerate(row):
if col != '-1':
# pylint:disable=invalid-name
x = col_index * TILESIZE
y = row_index * TILESIZE
if style == 'boundary':
Tile((x, y), [self.obstacle_sprites], 'invisible')
if style == 'grass':
random_grass_image = choice(graphics['grass'])
Tile((x, y), [
self.visible_sprites, self.obstacle_sprites, self.attackable_sprites
], 'grass', random_grass_image)
if style == 'object':
surface = graphics['objects'][int(col)]
Tile((x, y), [self.visible_sprites,
self.obstacle_sprites], 'object', surface)
if style == 'entities':
if col == '394':
self.player = Player(
(x, y),
[self.visible_sprites],
self.obstacle_sprites,
self.create_attack,
self.destroy_attack,
self.create_magic
)
else:
if col == '390':
monster_name = 'bamboo'
elif col == '391':
monster_name = 'spirit'
elif col == '392':
monster_name = 'raccoon'
else:
monster_name = 'squid'
Enemy(
monster_name,
(x, y),
[self.visible_sprites, self.attackable_sprites],
self.obstacle_sprites,
self.damage_player,
self.trigger_death_particles
)
def create_attack(self):
"""Creates the Weapon Sprite."""
self.current_attack = Weapon(
self.player, [self.visible_sprites, self.attack_sprites])
def destroy_attack(self):
"""Destroys the current weapon sprite if there is any."""
if self.current_attack:
self.current_attack.kill()
self.current_attack = None
def create_magic(self, style, strength, cost):
"""Spawns magic spell onto the level"""
print(style)
print(strength)
print(cost)
def player_attack_logic(self):
"""Checks if any player attacks have landed on an attackable object"""
if self.attack_sprites:
for attack_sprite in self.attack_sprites:
collision_sprites = pygame.sprite.spritecollide(
attack_sprite, self.attackable_sprites, False)
if collision_sprites:
for target_sprite in collision_sprites:
if target_sprite.sprite_type == 'grass':
pos = target_sprite.rect.center
offset = pygame.math.Vector2(0, 75)
for _ in range(randint(3, 6)):
self.animation_player.create_grass_particles(
pos - offset, [self.visible_sprites])
target_sprite.kill()
else:
target_sprite.get_damage(
self.player, attack_sprite.sprite_type)
def damage_player(self, amount, attack_type):
"""Damages the player by the given amount and spawn desired particle effects."""
if self.player.vulnerable:
self.player.health -= amount
self.player.vulnerable = False
self.player.hurt_time = pygame.time.get_ticks()
# spawn particles
self.animation_player.create_particles(
attack_type, self.player.rect.center, [self.visible_sprites])
def trigger_death_particles(self, pos, particle_type):
"""Tiggers particle effects that occur on death"""
self.animation_player.create_particles(
particle_type, pos, [self.visible_sprites])
def run(self):
"""Draws and updates all the sprites of the game."""
# update and draw the game
self.visible_sprites.custom_draw(self.player)
self.visible_sprites.update()
self.visible_sprites.enemy_update(self.player)
self.player_attack_logic()
self.ui.display(self.player)
class YSortCameraGroup(pygame.sprite.Group):
"""Custom sprite group that sorts sprites by their y coordinate in order for sprite overlaps
to happen with the correct perspective."""
def __init__(self):
# general setup
super().__init__()
self.display_surface = pygame.display.get_surface()
self.half_width = self.display_surface.get_size()[0] // 2
self.half_height = self.display_surface.get_size()[1] // 2
self.offset = pygame.math.Vector2()
# creating the floor
self.floor_surf = pygame.image.load(
'../graphics/tilemap/ground.png').convert()
self.floor_rect = self.floor_surf.get_rect(topleft=(0, 0))
def custom_draw(self, player):
"""Custom Draw function to handle drawing the sprites offset for the camera position."""
# getting the offset
self.offset.x = player.rect.centerx - self.half_width
self.offset.y = player.rect.centery - self.half_height
# drawing the floor
floor_offset_pos = self.floor_rect.topleft - self.offset
self.display_surface.blit(self.floor_surf, floor_offset_pos)
for sprite in sorted(self.sprites(), key=lambda sprite: sprite.rect.centery):
offset_position = sprite.rect.topleft - self.offset
self.display_surface.blit(sprite.image, offset_position)
def enemy_update(self, player):
"""Updates all the enemy sprites"""
enemy_sprites = [
sprite for sprite in self.sprites()
if hasattr(sprite, 'sprite_type') and sprite.sprite_type == 'enemy'
]
for enemy in enemy_sprites:
enemy.enemy_update(player)
| 43.044335 | 101 | 0.529183 |
2563a1fa7261568647dc1e0c4c3ed3c52110f1f9 | 683 | py | Python | careers/admin.py | Surveyor-Jr/zimaps | d4def072b50c7018e9f7800a36c2050f28791cc2 | [
"CC-BY-4.0"
] | null | null | null | careers/admin.py | Surveyor-Jr/zimaps | d4def072b50c7018e9f7800a36c2050f28791cc2 | [
"CC-BY-4.0"
] | null | null | null | careers/admin.py | Surveyor-Jr/zimaps | d4def072b50c7018e9f7800a36c2050f28791cc2 | [
"CC-BY-4.0"
] | null | null | null | from django.contrib import admin
from .models import FAQ, Services, Project, Category
@admin.register(Services)
class ServicesAdmin(admin.ModelAdmin):
'''Admin View for Service'''
list_display = ('name', 'intro')
list_filter = ('author', 'category')
search_fields = ('name',)
@admin.register(Project)
class ProjectAdmin(admin.ModelAdmin):
'''Admin View for Project'''
list_display = ('name',)
list_filter = ('author',)
search_fields = ('name', 'portfolio')
ordering = ('name',)
admin.site.register(Category)
@admin.register(FAQ)
class FAQAdmin(admin.ModelAdmin):
'''Admin view for FAQ's'''
list_display = ('question', 'answer')
| 24.392857 | 52 | 0.672035 |
d46f9e611feddb3df6313fd7942e655d714305e7 | 2,519 | py | Python | flash/wifi.py | sleepychild/diy_sha_sep2019 | 2fdf9ec548e725e48013e6d84311da01a1b7521b | [
"MIT"
] | null | null | null | flash/wifi.py | sleepychild/diy_sha_sep2019 | 2fdf9ec548e725e48013e6d84311da01a1b7521b | [
"MIT"
] | null | null | null | flash/wifi.py | sleepychild/diy_sha_sep2019 | 2fdf9ec548e725e48013e6d84311da01a1b7521b | [
"MIT"
] | null | null | null | import network
class network_interface():
station_interface = network.WLAN(network.STA_IF)
ap_interface = network.WLAN(network.AP_IF)
def interface_mode(self, mode=None):
if mode == None:
if self.station_interface.active():
self.ap_interface.active(False)
return network.STA_IF
if self.ap_interface.active():
self.station_interface.active(False)
return network.AP_IF
if mode == network.STA_IF:
self.ap_interface.active(False)
self.station_interface.active(True)
return network.STA_IF
if mode == network.AP_IF:
self.station_interface.active(False)
self.ap_interface.active(True)
return network.AP_IF
return None
def connect_to_ap(self, ssid, wifi_key):
self.interface_mode(network.STA_IF)
self.station_interface.connect(ssid, wifi_key)
return self.station_interface.isconnected()
def start_access_point(self):
self.interface_mode(network.AP_IF)
return net_if.ap_interface.config('essid'), net_if.ap_interface.ifconfig()[0]
def get_authmode_name(self, authmode):
if authmode == network.AUTH_MAX:
return "MAX"
if authmode == network.AUTH_OPEN:
return "OPEN"
if authmode == network.AUTH_WEP:
return "WEP"
if authmode == network.AUTH_WPA2_PSK:
return "WPA2_PSK"
if authmode == network.AUTH_WPA_PSK:
return "WPA_PSK"
if authmode == network.AUTH_WPA_WPA2_PSK:
return "WPA_WPA2_PSK"
return None
def get_access_points(self):
IF_MODE = self.interface_mode()
IS_CONNECTED = self.station_interface.isconnected()
if IF_MODE == network.STA_IF and IS_CONNECTED:
self.station_interface.disconnect()
else:
self.interface_mode(network.STA_IF)
ap_list = self.station_interface.scan()
self.interface_mode(IF_MODE)
access_points = []
for ap in ap_list:
access_point = {
"ssid":ap[0].decode(),
"bssid":ap[1],
"channel":ap[2],
"RSSI":ap[3],
"authmode":ap[4],
"authname":self.get_authmode_name(ap[4]),
"hidden":ap[5]
}
access_points.append(access_point)
return access_points
net_if = network_interface()
| 34.506849 | 85 | 0.593886 |
e1f352cf37484cdf7a0801d52b3a76de01518832 | 3,869 | py | Python | examples/wavenet/generate.py | nishnik/chainer | 03fc8a54b657f703855b25cbe53f56fda1295e76 | [
"MIT"
] | 1 | 2020-08-12T23:08:41.000Z | 2020-08-12T23:08:41.000Z | examples/wavenet/generate.py | nishnik/chainer | 03fc8a54b657f703855b25cbe53f56fda1295e76 | [
"MIT"
] | null | null | null | examples/wavenet/generate.py | nishnik/chainer | 03fc8a54b657f703855b25cbe53f56fda1295e76 | [
"MIT"
] | null | null | null | import argparse
import chainer
import chainerx
import librosa
import numpy
import tqdm
from net import UpsampleNet
from net import WaveNet
from utils import MuLaw
from utils import Preprocess
parser = argparse.ArgumentParser()
parser.add_argument('--input', '-i', help='input file')
parser.add_argument('--output', '-o', default='result.wav', help='output file')
parser.add_argument('--model', '-m', help='snapshot of trained model')
parser.add_argument('--n_loop', type=int, default=4,
help='Number of residual blocks')
parser.add_argument('--n_layer', type=int, default=10,
help='Number of layers in each residual block')
parser.add_argument('--a_channels', type=int, default=256,
help='Number of channels in the output layers')
parser.add_argument('--r_channels', type=int, default=64,
help='Number of channels in residual layers and embedding')
parser.add_argument('--s_channels', type=int, default=256,
help='Number of channels in the skip layers')
parser.add_argument('--use_embed_tanh', type=bool, default=True,
help='Use tanh after an initial 2x1 convolution')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
device.use()
if device.xp is chainer.backends.cuda.cupy:
chainer.global_config.autotune = True
# Preprocess
_, condition, _ = Preprocess(
sr=16000, n_fft=1024, hop_length=256, n_mels=128, top_db=20,
length=None, quantize=args.a_channels)(args.input)
x = numpy.zeros([1, args.a_channels, 1, 1], dtype=condition.dtype)
condition = numpy.expand_dims(condition, axis=0)
# Define networks
encoder = UpsampleNet(args.n_loop * args.n_layer, args.r_channels)
decoder = WaveNet(
args.n_loop, args.n_layer,
args.a_channels, args.r_channels, args.s_channels,
args.use_embed_tanh)
# Load trained parameters
chainer.serializers.load_npz(
args.model, encoder, 'updater/model:main/predictor/encoder/')
chainer.serializers.load_npz(
args.model, decoder, 'updater/model:main/predictor/decoder/')
# Non-autoregressive generate
x = device.send(x)
condition = device.send(condition)
encoder.to_device(device)
decoder.to_device(device)
x = chainer.Variable(x)
condition = chainer.Variable(condition)
conditions = encoder(condition)
decoder.initialize(1)
output = decoder.xp.zeros(conditions.shape[3])
# A workaround for ChainerX, which does not have random.choice.
# TODO(niboshi): Implement it in ChainerX
def random_choice(device, a, size, p):
if device.xp is chainerx:
return device.send(
numpy.random.choice(a, size=size, p=chainerx.to_numpy(p)))
return device.xp.random.choice(a, size=size, p=p)
# Autoregressive generate
for i in tqdm.tqdm(range(len(output))):
with chainer.no_backprop_mode():
out = decoder.generate(x, conditions[:, :, :, i:i + 1]).array
value = random_choice(
device,
args.a_channels, size=1,
p=chainer.functions.softmax(out).array[0, :, 0, 0])[0]
zeros = decoder.xp.zeros_like(x.array)
zeros[:, value, :, :] = 1
x = chainer.Variable(zeros)
output[i] = value
# Save
output = chainer.get_device('@numpy').send(output)
wave = MuLaw(args.a_channels).itransform(output)
librosa.output.write_wav(args.output, wave, 16000)
| 36.847619 | 79 | 0.688292 |
d885023b0d41dc7066d14d3610125c2ad5a25c43 | 2,404 | py | Python | backend/mondo.py | STARInformatics/biomed-workbench | 510f533d9ec1296f69b0a5e979da5831d0aeb2dd | [
"MIT"
] | null | null | null | backend/mondo.py | STARInformatics/biomed-workbench | 510f533d9ec1296f69b0a5e979da5831d0aeb2dd | [
"MIT"
] | 17 | 2019-05-08T00:31:20.000Z | 2022-02-26T10:30:58.000Z | backend/mondo.py | STARInformatics/biomed-workbench | 510f533d9ec1296f69b0a5e979da5831d0aeb2dd | [
"MIT"
] | null | null | null | import json, os
from functools import lru_cache
from typing import List
MONDO = 'http://purl.obolibrary.org/obo/MONDO_'
def get(d:dict, *keys:str, default=None):
try:
for key in keys:
d = d[key]
return d
except:
return default
@lru_cache()
def load_mondo():
path = os.path.dirname(os.path.abspath(__name__))
with open(f'{path}/backend/data/mondo.json', 'r',encoding="utf8") as f:
d = json.load(f)
results = {}
for graph in d['graphs']:
for node in graph['nodes']:
if MONDO in node['id']:
if 'lbl' not in node:
continue
if get(node, 'meta', 'deprecated') is True:
continue
synonoms = [syn['val'].lower() for syn in get(node, 'meta', 'synonyms', default=[])]
curie = node['id'].replace(MONDO, 'MONDO:')
# import pudb; pu.db
results[curie] = {
'iri' : node['id'],
'definition' : get(node, 'meta', 'definition', 'val'),
'synonoms' : synonoms,
'name' : node['lbl'].lower(),
}
return results
def search(keywords:List[str]) -> List[dict]:
if isinstance(keywords, str):
keywords = keywords.lower().split()
elif isinstance(keywords, (list, tuple, set)):
keywords = [k.lower() for k in keywords]
else:
raise Exception(f'Invalid type {type(keywords)} for keywords')
nodes = load_mondo()
results = []
for curie, node in nodes.items():
node['id'] = curie
for keyword in keywords:
if keyword in node['name']:
results.append(node)
break
def keyword_order(node):
synonoms = node['synonoms'] + [node['name']]
m = min(-sum(synonom.count(keyword) / len(synonom) for keyword in keywords) for synonom in synonoms)
return m
def exact_term_order(node):
synonoms = node['synonoms'] + [node['name']]
k = ' '.join(keywords)
s = -sum(synonom.count(k) for synonom in synonoms)
if node['name'] == k:
s -= 1
return s
results = sorted(results, key=keyword_order)
results = sorted(results, key=exact_term_order)
return results
| 32.931507 | 108 | 0.522047 |
cccedaa5942ee4e46b25c11785c1b9ea160181bf | 23,827 | py | Python | mesonbuild/mintro.py | jekstrand/meson | 189d3b051393271a938e78e159da1ee6476a34c9 | [
"Apache-2.0"
] | 44 | 2022-03-16T08:32:31.000Z | 2022-03-31T16:02:35.000Z | mesonbuild/mintro.py | jekstrand/meson | 189d3b051393271a938e78e159da1ee6476a34c9 | [
"Apache-2.0"
] | 2 | 2015-03-23T15:30:17.000Z | 2015-03-23T20:19:19.000Z | mesonbuild/mintro.py | jekstrand/meson | 189d3b051393271a938e78e159da1ee6476a34c9 | [
"Apache-2.0"
] | 18 | 2022-03-19T04:41:04.000Z | 2022-03-31T03:32:12.000Z | # Copyright 2014-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is a helper script for IDE developers. It allows you to
extract information such as list of targets, files, compiler flags,
tests and so on. All output is in JSON for simple parsing.
Currently only works for the Ninja backend. Others use generated
project files and don't need this info."""
import collections
import json
from . import build, coredata as cdata
from . import mesonlib
from .ast import IntrospectionInterpreter, build_target_functions, AstConditionLevel, AstIDGenerator, AstIndentationGenerator, AstJSONPrinter
from . import mlog
from .backend import backends
from .mparser import BaseNode, FunctionNode, ArrayNode, ArgumentNode, StringNode
from .interpreter import Interpreter
from pathlib import PurePath
import typing as T
import os
def get_meson_info_file(info_dir: str) -> str:
return os.path.join(info_dir, 'meson-info.json')
def get_meson_introspection_version() -> str:
return '1.0.0'
def get_meson_introspection_required_version() -> T.List[str]:
return ['>=1.0', '<2.0']
class IntroCommand:
def __init__(self,
desc: str,
func: T.Optional[T.Callable[[], T.Union[dict, list]]] = None,
no_bd: T.Optional[T.Callable[[IntrospectionInterpreter], T.Union[dict, list]]] = None) -> None:
self.desc = desc + '.'
self.func = func
self.no_bd = no_bd
def get_meson_introspection_types(coredata: T.Optional[cdata.CoreData] = None,
builddata: T.Optional[build.Build] = None,
backend: T.Optional[backends.Backend] = None,
sourcedir: T.Optional[str] = None) -> 'T.Mapping[str, IntroCommand]':
if backend and builddata:
benchmarkdata = backend.create_test_serialisation(builddata.get_benchmarks())
testdata = backend.create_test_serialisation(builddata.get_tests())
installdata = backend.create_install_data()
interpreter = backend.interpreter
else:
benchmarkdata = testdata = installdata = None
# Enforce key order for argparse
return collections.OrderedDict([
('ast', IntroCommand('Dump the AST of the meson file', no_bd=dump_ast)),
('benchmarks', IntroCommand('List all benchmarks', func=lambda: list_benchmarks(benchmarkdata))),
('buildoptions', IntroCommand('List all build options', func=lambda: list_buildoptions(coredata), no_bd=list_buildoptions_from_source)),
('buildsystem_files', IntroCommand('List files that make up the build system', func=lambda: list_buildsystem_files(builddata, interpreter))),
('dependencies', IntroCommand('List external dependencies', func=lambda: list_deps(coredata), no_bd=list_deps_from_source)),
('scan_dependencies', IntroCommand('Scan for dependencies used in the meson.build file', no_bd=list_deps_from_source)),
('installed', IntroCommand('List all installed files and directories', func=lambda: list_installed(installdata))),
('projectinfo', IntroCommand('Information about projects', func=lambda: list_projinfo(builddata), no_bd=list_projinfo_from_source)),
('targets', IntroCommand('List top level targets', func=lambda: list_targets(builddata, installdata, backend), no_bd=list_targets_from_source)),
('tests', IntroCommand('List all unit tests', func=lambda: list_tests(testdata))),
])
def add_arguments(parser):
intro_types = get_meson_introspection_types()
for key, val in intro_types.items():
flag = '--' + key.replace('_', '-')
parser.add_argument(flag, action='store_true', dest=key, default=False, help=val.desc)
parser.add_argument('--backend', choices=sorted(cdata.backendlist), dest='backend', default='ninja',
help='The backend to use for the --buildoptions introspection.')
parser.add_argument('-a', '--all', action='store_true', dest='all', default=False,
help='Print all available information.')
parser.add_argument('-i', '--indent', action='store_true', dest='indent', default=False,
help='Enable pretty printed JSON.')
parser.add_argument('-f', '--force-object-output', action='store_true', dest='force_dict', default=False,
help='Always use the new JSON format for multiple entries (even for 0 and 1 introspection commands)')
parser.add_argument('builddir', nargs='?', default='.', help='The build directory')
def dump_ast(intr: IntrospectionInterpreter) -> T.Dict[str, T.Any]:
printer = AstJSONPrinter()
intr.ast.accept(printer)
return printer.result
def list_installed(installdata):
res = {}
if installdata is not None:
for t in installdata.targets:
res[os.path.join(installdata.build_dir, t.fname)] = \
os.path.join(installdata.prefix, t.outdir, os.path.basename(t.fname))
for alias in t.aliases.keys():
res[os.path.join(installdata.build_dir, alias)] = \
os.path.join(installdata.prefix, t.outdir, os.path.basename(alias))
for path, installpath, _ in installdata.data:
res[path] = os.path.join(installdata.prefix, installpath)
for path, installdir, _ in installdata.headers:
res[path] = os.path.join(installdata.prefix, installdir, os.path.basename(path))
for path, installpath, _ in installdata.man:
res[path] = os.path.join(installdata.prefix, installpath)
for path, installpath, _, _ in installdata.install_subdirs:
res[path] = os.path.join(installdata.prefix, installpath)
return res
def list_targets_from_source(intr: IntrospectionInterpreter) -> T.List[T.Dict[str, T.Union[bool, str, T.List[T.Union[str, T.Dict[str, T.Union[str, T.List[str], bool]]]]]]]:
tlist = [] # type: T.List[T.Dict[str, T.Union[bool, str, T.List[T.Union[str, T.Dict[str, T.Union[str, T.List[str], bool]]]]]]]
for i in intr.targets:
sources = [] # type: T.List[str]
for n in i['sources']:
args = [] # type: T.List[BaseNode]
if isinstance(n, FunctionNode):
args = list(n.args.arguments)
if n.func_name in build_target_functions:
args.pop(0)
elif isinstance(n, ArrayNode):
args = n.args.arguments
elif isinstance(n, ArgumentNode):
args = n.arguments
for j in args:
if isinstance(j, StringNode):
assert isinstance(j.value, str)
sources += [j.value]
elif isinstance(j, str):
sources += [j]
tlist += [{
'name': i['name'],
'id': i['id'],
'type': i['type'],
'defined_in': i['defined_in'],
'filename': [os.path.join(i['subdir'], x) for x in i['outputs']],
'build_by_default': i['build_by_default'],
'target_sources': [{
'language': 'unknown',
'compiler': [],
'parameters': [],
'sources': [os.path.normpath(os.path.join(os.path.abspath(intr.source_root), i['subdir'], x)) for x in sources],
'generated_sources': []
}],
'subproject': None, # Subprojects are not supported
'installed': i['installed']
}]
return tlist
def list_targets(builddata: build.Build, installdata, backend: backends.Backend) -> T.List[T.Dict[str, T.Union[bool, str, T.List[T.Union[str, T.Dict[str, T.Union[str, T.List[str], bool]]]]]]]:
tlist = [] # type: T.List[T.Dict[str, T.Union[bool, str, T.List[T.Union[str, T.Dict[str, T.Union[str, T.List[str], bool]]]]]]]
build_dir = builddata.environment.get_build_dir()
src_dir = builddata.environment.get_source_dir()
# Fast lookup table for installation files
install_lookuptable = {}
for i in installdata.targets:
out = [os.path.join(installdata.prefix, i.outdir, os.path.basename(i.fname))]
out += [os.path.join(installdata.prefix, i.outdir, os.path.basename(x)) for x in i.aliases]
install_lookuptable[os.path.basename(i.fname)] = [str(PurePath(x)) for x in out]
for (idname, target) in builddata.get_targets().items():
if not isinstance(target, build.Target):
raise RuntimeError('The target object in `builddata.get_targets()` is not of type `build.Target`. Please file a bug with this error message.')
t = {
'name': target.get_basename(),
'id': idname,
'type': target.get_typename(),
'defined_in': os.path.normpath(os.path.join(src_dir, target.subdir, 'meson.build')),
'filename': [os.path.join(build_dir, target.subdir, x) for x in target.get_outputs()],
'build_by_default': target.build_by_default,
'target_sources': backend.get_introspection_data(idname, target),
'subproject': target.subproject or None
}
if installdata and target.should_install():
t['installed'] = True
t['install_filename'] = [install_lookuptable.get(x, [None]) for x in target.get_outputs()]
t['install_filename'] = [x for sublist in t['install_filename'] for x in sublist] # flatten the list
else:
t['installed'] = False
tlist.append(t)
return tlist
def list_buildoptions_from_source(intr: IntrospectionInterpreter) -> T.List[T.Dict[str, T.Union[str, bool, int, T.List[str]]]]:
subprojects = [i['name'] for i in intr.project_data['subprojects']]
return list_buildoptions(intr.coredata, subprojects)
def list_buildoptions(coredata: cdata.CoreData, subprojects: T.Optional[T.List[str]] = None) -> T.List[T.Dict[str, T.Union[str, bool, int, T.List[str]]]]:
optlist = [] # type: T.List[T.Dict[str, T.Union[str, bool, int, T.List[str]]]]
dir_option_names = ['bindir',
'datadir',
'includedir',
'infodir',
'libdir',
'libexecdir',
'localedir',
'localstatedir',
'mandir',
'prefix',
'sbindir',
'sharedstatedir',
'sysconfdir']
test_option_names = ['errorlogs',
'stdsplit']
core_option_names = [k for k in coredata.builtins if k not in dir_option_names + test_option_names]
dir_options = {k: o for k, o in coredata.builtins.items() if k in dir_option_names}
test_options = {k: o for k, o in coredata.builtins.items() if k in test_option_names}
core_options = {k: o for k, o in coredata.builtins.items() if k in core_option_names}
if subprojects:
# Add per subproject built-in options
sub_core_options = {}
for sub in subprojects:
for k, o in core_options.items():
if o.yielding:
continue
sub_core_options[sub + ':' + k] = o
core_options.update(sub_core_options)
def add_keys(options: T.Dict[str, cdata.UserOption], section: str, machine: str = 'any') -> None:
for key in sorted(options.keys()):
opt = options[key]
optdict = {'name': key, 'value': opt.value, 'section': section, 'machine': machine}
if isinstance(opt, cdata.UserStringOption):
typestr = 'string'
elif isinstance(opt, cdata.UserBooleanOption):
typestr = 'boolean'
elif isinstance(opt, cdata.UserComboOption):
optdict['choices'] = opt.choices
typestr = 'combo'
elif isinstance(opt, cdata.UserIntegerOption):
typestr = 'integer'
elif isinstance(opt, cdata.UserArrayOption):
typestr = 'array'
else:
raise RuntimeError("Unknown option type")
optdict['type'] = typestr
optdict['description'] = opt.description
optlist.append(optdict)
add_keys(core_options, 'core')
add_keys(coredata.builtins_per_machine.host, 'core', machine='host')
add_keys(
{'build.' + k: o for k, o in coredata.builtins_per_machine.build.items()},
'core',
machine='build',
)
add_keys(coredata.backend_options, 'backend')
add_keys(coredata.base_options, 'base')
add_keys(
dict(coredata.flatten_lang_iterator(coredata.compiler_options.host.items())),
'compiler',
machine='host',
)
add_keys(
{
'build.' + k: o for k, o in
coredata.flatten_lang_iterator(coredata.compiler_options.build.items())
},
'compiler',
machine='build',
)
add_keys(dir_options, 'directory')
add_keys(coredata.user_options, 'user')
add_keys(test_options, 'test')
return optlist
def find_buildsystem_files_list(src_dir) -> T.List[str]:
# I feel dirty about this. But only slightly.
filelist = [] # type: T.List[str]
for root, _, files in os.walk(src_dir):
for f in files:
if f == 'meson.build' or f == 'meson_options.txt':
filelist.append(os.path.relpath(os.path.join(root, f), src_dir))
return filelist
def list_buildsystem_files(builddata: build.Build, interpreter: Interpreter) -> T.List[str]:
src_dir = builddata.environment.get_source_dir()
filelist = interpreter.get_build_def_files()
filelist = [PurePath(src_dir, x).as_posix() for x in filelist]
return filelist
def list_deps_from_source(intr: IntrospectionInterpreter) -> T.List[T.Dict[str, T.Union[str, bool]]]:
result = [] # type: T.List[T.Dict[str, T.Union[str, bool]]]
for i in intr.dependencies:
keys = [
'name',
'required',
'version',
'has_fallback',
'conditional',
]
result += [{k: v for k, v in i.items() if k in keys}]
return result
def list_deps(coredata: cdata.CoreData) -> T.List[T.Dict[str, T.Union[str, T.List[str]]]]:
result = [] # type: T.List[T.Dict[str, T.Union[str, T.List[str]]]]
for d in coredata.deps.host.values():
if d.found():
result += [{'name': d.name,
'version': d.get_version(),
'compile_args': d.get_compile_args(),
'link_args': d.get_link_args()}]
return result
def get_test_list(testdata) -> T.List[T.Dict[str, T.Union[str, int, T.List[str], T.Dict[str, str]]]]:
result = [] # type: T.List[T.Dict[str, T.Union[str, int, T.List[str], T.Dict[str, str]]]]
for t in testdata:
to = {}
if isinstance(t.fname, str):
fname = [t.fname]
else:
fname = t.fname
to['cmd'] = fname + t.cmd_args
if isinstance(t.env, build.EnvironmentVariables):
to['env'] = t.env.get_env({})
else:
to['env'] = t.env
to['name'] = t.name
to['workdir'] = t.workdir
to['timeout'] = t.timeout
to['suite'] = t.suite
to['is_parallel'] = t.is_parallel
to['priority'] = t.priority
to['protocol'] = str(t.protocol)
result.append(to)
return result
def list_tests(testdata) -> T.List[T.Dict[str, T.Union[str, int, T.List[str], T.Dict[str, str]]]]:
return get_test_list(testdata)
def list_benchmarks(benchdata) -> T.List[T.Dict[str, T.Union[str, int, T.List[str], T.Dict[str, str]]]]:
return get_test_list(benchdata)
def list_projinfo(builddata: build.Build) -> T.Dict[str, T.Union[str, T.List[T.Dict[str, str]]]]:
result = {'version': builddata.project_version,
'descriptive_name': builddata.project_name,
'subproject_dir': builddata.subproject_dir}
subprojects = []
for k, v in builddata.subprojects.items():
c = {'name': k,
'version': v,
'descriptive_name': builddata.projects.get(k)}
subprojects.append(c)
result['subprojects'] = subprojects
return result
def list_projinfo_from_source(intr: IntrospectionInterpreter) -> T.Dict[str, T.Union[str, T.List[T.Dict[str, str]]]]:
sourcedir = intr.source_root
files = find_buildsystem_files_list(sourcedir)
files = [os.path.normpath(x) for x in files]
for i in intr.project_data['subprojects']:
basedir = os.path.join(intr.subproject_dir, i['name'])
i['buildsystem_files'] = [x for x in files if x.startswith(basedir)]
files = [x for x in files if not x.startswith(basedir)]
intr.project_data['buildsystem_files'] = files
intr.project_data['subproject_dir'] = intr.subproject_dir
return intr.project_data
def print_results(options, results: T.Sequence[T.Tuple[str, T.Union[dict, T.List[T.Any]]]], indent: int) -> int:
if not results and not options.force_dict:
print('No command specified')
return 1
elif len(results) == 1 and not options.force_dict:
# Make to keep the existing output format for a single option
print(json.dumps(results[0][1], indent=indent))
else:
out = {}
for i in results:
out[i[0]] = i[1]
print(json.dumps(out, indent=indent))
return 0
def run(options) -> int:
datadir = 'meson-private'
infodir = 'meson-info'
if options.builddir is not None:
datadir = os.path.join(options.builddir, datadir)
infodir = os.path.join(options.builddir, infodir)
indent = 4 if options.indent else None
results = [] # type: T.List[T.Tuple[str, T.Union[dict, T.List[T.Any]]]]
sourcedir = '.' if options.builddir == 'meson.build' else options.builddir[:-11]
intro_types = get_meson_introspection_types(sourcedir=sourcedir)
if 'meson.build' in [os.path.basename(options.builddir), options.builddir]:
# Make sure that log entries in other parts of meson don't interfere with the JSON output
mlog.disable()
backend = backends.get_backend_from_name(options.backend)
intr = IntrospectionInterpreter(sourcedir, '', backend.name, visitors = [AstIDGenerator(), AstIndentationGenerator(), AstConditionLevel()])
intr.analyze()
# Re-enable logging just in case
mlog.enable()
for key, val in intro_types.items():
if (not options.all and not getattr(options, key, False)) or not val.no_bd:
continue
results += [(key, val.no_bd(intr))]
return print_results(options, results, indent)
infofile = get_meson_info_file(infodir)
if not os.path.isdir(datadir) or not os.path.isdir(infodir) or not os.path.isfile(infofile):
print('Current directory is not a meson build directory.\n'
'Please specify a valid build dir or change the working directory to it.\n'
'It is also possible that the build directory was generated with an old\n'
'meson version. Please regenerate it in this case.')
return 1
with open(infofile, 'r') as fp:
raw = json.load(fp)
intro_vers = raw.get('introspection', {}).get('version', {}).get('full', '0.0.0')
vers_to_check = get_meson_introspection_required_version()
for i in vers_to_check:
if not mesonlib.version_compare(intro_vers, i):
print('Introspection version {} is not supported. '
'The required version is: {}'
.format(intro_vers, ' and '.join(vers_to_check)))
return 1
# Extract introspection information from JSON
for i in intro_types.keys():
if not intro_types[i].func:
continue
if not options.all and not getattr(options, i, False):
continue
curr = os.path.join(infodir, 'intro-{}.json'.format(i))
if not os.path.isfile(curr):
print('Introspection file {} does not exist.'.format(curr))
return 1
with open(curr, 'r') as fp:
results += [(i, json.load(fp))]
return print_results(options, results, indent)
updated_introspection_files = [] # type: T.List[str]
def write_intro_info(intro_info: T.Sequence[T.Tuple[str, T.Union[dict, T.List[T.Any]]]], info_dir: str) -> None:
global updated_introspection_files
for i in intro_info:
out_file = os.path.join(info_dir, 'intro-{}.json'.format(i[0]))
tmp_file = os.path.join(info_dir, 'tmp_dump.json')
with open(tmp_file, 'w') as fp:
json.dump(i[1], fp)
fp.flush() # Not sure if this is needed
os.replace(tmp_file, out_file)
updated_introspection_files += [i[0]]
def generate_introspection_file(builddata: build.Build, backend: backends.Backend) -> None:
coredata = builddata.environment.get_coredata()
intro_types = get_meson_introspection_types(coredata=coredata, builddata=builddata, backend=backend)
intro_info = [] # type: T.List[T.Tuple[str, T.Union[dict, T.List[T.Any]]]]
for key, val in intro_types.items():
if not val.func:
continue
intro_info += [(key, val.func())]
write_intro_info(intro_info, builddata.environment.info_dir)
def update_build_options(coredata: cdata.CoreData, info_dir) -> None:
intro_info = [
('buildoptions', list_buildoptions(coredata))
]
write_intro_info(intro_info, info_dir)
def split_version_string(version: str) -> T.Dict[str, T.Union[str, int]]:
vers_list = version.split('.')
return {
'full': version,
'major': int(vers_list[0] if len(vers_list) > 0 else 0),
'minor': int(vers_list[1] if len(vers_list) > 1 else 0),
'patch': int(vers_list[2] if len(vers_list) > 2 else 0)
}
def write_meson_info_file(builddata: build.Build, errors: list, build_files_updated: bool = False) -> None:
global updated_introspection_files
info_dir = builddata.environment.info_dir
info_file = get_meson_info_file(info_dir)
intro_types = get_meson_introspection_types()
intro_info = {}
for i in intro_types.keys():
if not intro_types[i].func:
continue
intro_info[i] = {
'file': 'intro-{}.json'.format(i),
'updated': i in updated_introspection_files
}
info_data = {
'meson_version': split_version_string(cdata.version),
'directories': {
'source': builddata.environment.get_source_dir(),
'build': builddata.environment.get_build_dir(),
'info': info_dir,
},
'introspection': {
'version': split_version_string(get_meson_introspection_version()),
'information': intro_info,
},
'build_files_updated': build_files_updated,
}
if errors:
info_data['error'] = True
info_data['error_list'] = [x if isinstance(x, str) else str(x) for x in errors]
else:
info_data['error'] = False
# Write the data to disc
tmp_file = os.path.join(info_dir, 'tmp_dump.json')
with open(tmp_file, 'w') as fp:
json.dump(info_data, fp)
fp.flush()
os.replace(tmp_file, info_file)
| 44.787594 | 192 | 0.623914 |
2517cca648f8b325a9b6a0c26e2b317c99bf7047 | 37,677 | py | Python | ratsql/models/nl2code/decoder.py | drawar/rat-sql | 95ffe45d1a55d54e22e1d51851177e783a12fac5 | [
"MIT"
] | null | null | null | ratsql/models/nl2code/decoder.py | drawar/rat-sql | 95ffe45d1a55d54e22e1d51851177e783a12fac5 | [
"MIT"
] | null | null | null | ratsql/models/nl2code/decoder.py | drawar/rat-sql | 95ffe45d1a55d54e22e1d51851177e783a12fac5 | [
"MIT"
] | 1 | 2021-07-19T04:40:42.000Z | 2021-07-19T04:40:42.000Z | import collections.abc
import copy
import itertools
import json
import os
import attr
import entmax
import torch
import torch.nn.functional as F
from ratsql.models import abstract_preproc
from ratsql.models import attention
from ratsql.models import variational_lstm
from ratsql.models.nl2code.infer_tree_traversal import InferenceTreeTraversal
from ratsql.models.nl2code.train_tree_traversal import TrainTreeTraversal
from ratsql.models.nl2code.tree_traversal import TreeTraversal
from ratsql.utils import registry
from ratsql.utils import serialization
from ratsql.utils import vocab
def lstm_init(device, num_layers, hidden_size, *batch_sizes):
init_size = batch_sizes + (hidden_size,)
if num_layers is not None:
init_size = (num_layers,) + init_size
init = torch.zeros(*init_size, device=device)
return (init, init)
def maybe_stack(items, dim=None):
to_stack = [item for item in items if item is not None]
if not to_stack:
return None
elif len(to_stack) == 1:
return to_stack[0].unsqueeze(dim)
else:
return torch.stack(to_stack, dim)
def accumulate_logprobs(d, keys_and_logprobs):
for key, logprob in keys_and_logprobs:
existing = d.get(key)
if existing is None:
d[key] = logprob
else:
d[key] = torch.logsumexp(
torch.stack((logprob, existing), dim=0),
dim=0,
)
def get_field_presence_info(ast_wrapper, node, field_infos):
present = []
for field_info in field_infos:
field_value = node.get(field_info.name)
is_present = field_value is not None and field_value != []
maybe_missing = field_info.opt or field_info.seq
is_builtin_type = field_info.type in ast_wrapper.primitive_types
if maybe_missing and is_builtin_type:
# TODO: make it possible to deal with "singleton?"
present.append(is_present and type(field_value).__name__)
elif maybe_missing and not is_builtin_type:
present.append(is_present)
elif not maybe_missing and is_builtin_type:
present.append(type(field_value).__name__)
elif not maybe_missing and not is_builtin_type:
assert is_present
present.append(True)
return tuple(present)
@attr.s
class NL2CodeDecoderPreprocItem:
tree = attr.ib()
orig_code = attr.ib()
class NL2CodeDecoderPreproc(abstract_preproc.AbstractPreproc):
def __init__(
self,
grammar,
save_path,
min_freq=3,
max_count=5000,
use_seq_elem_rules=False,
):
self.grammar = registry.construct('grammar', grammar)
self.ast_wrapper = self.grammar.ast_wrapper
self.vocab_path = os.path.join(save_path, 'dec_vocab.json')
self.observed_productions_path = os.path.join(
save_path, 'observed_productions.json',
)
self.grammar_rules_path = os.path.join(save_path, 'grammar_rules.json')
self.data_dir = os.path.join(save_path, 'dec')
self.vocab_builder = vocab.VocabBuilder(min_freq, max_count)
self.use_seq_elem_rules = use_seq_elem_rules
self.items = collections.defaultdict(list)
self.sum_type_constructors = collections.defaultdict(set)
self.field_presence_infos = collections.defaultdict(set)
self.seq_lengths = collections.defaultdict(set)
self.primitive_types = set()
self.vocab = None
self.all_rules = None
self.rules_mask = None
def validate_item(self, item, section):
parsed = self.grammar.parse(item.code, section)
if parsed:
try:
self.ast_wrapper.verify_ast(parsed)
except AssertionError:
return section != 'train', None
return True, parsed
return section != 'train', None
def add_item(self, item, section, validation_info):
root = validation_info
if section == 'train':
for token in self._all_tokens(root):
self.vocab_builder.add_word(token)
self._record_productions(root)
self.items[section].append(
NL2CodeDecoderPreprocItem(
tree=root,
orig_code=item.code,
),
)
def clear_items(self):
self.items = collections.defaultdict(list)
def save(self):
os.makedirs(self.data_dir, exist_ok=True)
self.vocab = self.vocab_builder.finish()
self.vocab.save(self.vocab_path)
for section, items in self.items.items():
with open(os.path.join(self.data_dir, section + '.jsonl'), 'w') as f:
for item in items:
f.write(json.dumps(attr.asdict(item)) + '\n')
# observed_productions
self.sum_type_constructors = serialization.to_dict_with_sorted_values(
self.sum_type_constructors,
)
self.field_presence_infos = serialization.to_dict_with_sorted_values(
self.field_presence_infos, key=str,
)
self.seq_lengths = serialization.to_dict_with_sorted_values(
self.seq_lengths,
)
self.primitive_types = sorted(self.primitive_types)
with open(self.observed_productions_path, 'w') as f:
json.dump(
{
'sum_type_constructors': self.sum_type_constructors,
'field_presence_infos': self.field_presence_infos,
'seq_lengths': self.seq_lengths,
'primitive_types': self.primitive_types,
}, f, indent=2, sort_keys=True,
)
# grammar
self.all_rules, self.rules_mask = self._calculate_rules()
with open(self.grammar_rules_path, 'w') as f:
json.dump(
{
'all_rules': self.all_rules,
'rules_mask': self.rules_mask,
}, f, indent=2, sort_keys=True,
)
def load(self):
self.vocab = vocab.Vocab.load(self.vocab_path)
observed_productions = json.load(open(self.observed_productions_path))
self.sum_type_constructors = observed_productions['sum_type_constructors']
self.field_presence_infos = observed_productions['field_presence_infos']
self.seq_lengths = observed_productions['seq_lengths']
self.primitive_types = observed_productions['primitive_types']
grammar = json.load(open(self.grammar_rules_path))
self.all_rules = serialization.tuplify(grammar['all_rules'])
self.rules_mask = grammar['rules_mask']
def dataset(self, section):
return [
NL2CodeDecoderPreprocItem(**json.loads(line))
for line in open(os.path.join(self.data_dir, section + '.jsonl'))
]
def _record_productions(self, tree):
queue = [(tree, False)]
while queue:
node, is_seq_elem = queue.pop()
node_type = node['_type']
# Rules of the form:
# expr -> Attribute | Await | BinOp | BoolOp | ...
# expr_seq_elem -> Attribute | Await | ... | Template1 | Template2 | ...
for type_name in [node_type] + node.get('_extra_types', []):
if type_name in self.ast_wrapper.constructors:
sum_type_name = self.ast_wrapper.constructor_to_sum_type[type_name]
if is_seq_elem and self.use_seq_elem_rules:
self.sum_type_constructors[
sum_type_name
+ '_seq_elem'
].add(type_name)
else:
self.sum_type_constructors[sum_type_name].add(
type_name,
)
# Rules of the form:
# FunctionDef
# -> identifier name, arguments args
# | identifier name, arguments args, stmt* body
# | identifier name, arguments args, expr* decorator_list
# | identifier name, arguments args, expr? returns
# ...
# | identifier name, arguments args, stmt* body, expr* decorator_list, expr returns
assert node_type in self.ast_wrapper.singular_types
field_presence_info = get_field_presence_info(
self.ast_wrapper,
node,
self.ast_wrapper.singular_types[node_type].fields,
)
self.field_presence_infos[node_type].add(field_presence_info)
for field_info in self.ast_wrapper.singular_types[node_type].fields:
field_value = node.get(
field_info.name, [] if field_info.seq else None,
)
to_enqueue = []
if field_info.seq:
# Rules of the form:
# stmt* -> stmt
# | stmt stmt
# | stmt stmt stmt
self.seq_lengths[
field_info.type
+ '*'
].add(len(field_value))
to_enqueue = field_value
else:
to_enqueue = [field_value]
for child in to_enqueue:
if isinstance(child, collections.abc.Mapping) and '_type' in child:
queue.append((child, field_info.seq))
else:
self.primitive_types.add(type(child).__name__)
def _calculate_rules(self):
offset = 0
all_rules = []
rules_mask = {}
# Rules of the form:
# expr -> Attribute | Await | BinOp | BoolOp | ...
# expr_seq_elem -> Attribute | Await | ... | Template1 | Template2 | ...
for parent, children in sorted(self.sum_type_constructors.items()):
assert not isinstance(children, set)
rules_mask[parent] = (offset, offset + len(children))
offset += len(children)
all_rules += [(parent, child) for child in children]
# Rules of the form:
# FunctionDef
# -> identifier name, arguments args
# | identifier name, arguments args, stmt* body
# | identifier name, arguments args, expr* decorator_list
# | identifier name, arguments args, expr? returns
# ...
# | identifier name, arguments args, stmt* body, expr* decorator_list, expr returns
for name, field_presence_infos in sorted(self.field_presence_infos.items()):
assert not isinstance(field_presence_infos, set)
rules_mask[name] = (offset, offset + len(field_presence_infos))
offset += len(field_presence_infos)
all_rules += [
(name, presence)
for presence in field_presence_infos
]
# Rules of the form:
# stmt* -> stmt
# | stmt stmt
# | stmt stmt stmt
for seq_type_name, lengths in sorted(self.seq_lengths.items()):
assert not isinstance(lengths, set)
rules_mask[seq_type_name] = (offset, offset + len(lengths))
offset += len(lengths)
all_rules += [(seq_type_name, i) for i in lengths]
return tuple(all_rules), rules_mask
def _all_tokens(self, root):
queue = [root]
while queue:
node = queue.pop()
type_info = self.ast_wrapper.singular_types[node['_type']]
for field_info in reversed(type_info.fields):
field_value = node.get(field_info.name)
if field_info.type in self.grammar.pointers:
pass
elif field_info.type in self.ast_wrapper.primitive_types:
yield from self.grammar.tokenize_field_value(field_value)
elif isinstance(field_value, (list, tuple)):
queue.extend(field_value)
elif field_value is not None:
queue.append(field_value)
@attr.s
class TreeState:
node = attr.ib()
parent_field_type = attr.ib()
@registry.register('decoder', 'NL2Code')
class NL2CodeDecoder(torch.nn.Module):
Preproc = NL2CodeDecoderPreproc
def __init__(
self,
device,
preproc,
#
rule_emb_size=128,
node_embed_size=64,
# TODO: This should be automatically inferred from encoder
enc_recurrent_size=256,
recurrent_size=256,
dropout=0.,
desc_attn='bahdanau',
copy_pointer=None,
multi_loss_type='logsumexp',
sup_att=None,
use_align_mat=False,
use_align_loss=False,
enumerate_order=False,
loss_type='softmax',
):
super().__init__()
self._device = device
self.preproc = preproc
self.ast_wrapper = preproc.ast_wrapper
self.terminal_vocab = preproc.vocab
self.rule_emb_size = rule_emb_size
self.node_emb_size = node_embed_size
self.enc_recurrent_size = enc_recurrent_size
self.recurrent_size = recurrent_size
self.rules_index = {
v: idx for idx,
v in enumerate(self.preproc.all_rules)
}
self.use_align_mat = use_align_mat
self.use_align_loss = use_align_loss
self.enumerate_order = enumerate_order
if use_align_mat:
from ratsql.models.spider import spider_dec_func
self.compute_align_loss = lambda *args: \
spider_dec_func.compute_align_loss(self, *args)
self.compute_pointer_with_align = lambda *args: \
spider_dec_func.compute_pointer_with_align(self, *args)
if self.preproc.use_seq_elem_rules:
self.node_type_vocab = vocab.Vocab(
sorted(self.preproc.primitive_types)
+ sorted(self.ast_wrapper.custom_primitive_types)
+ sorted(self.preproc.sum_type_constructors.keys())
+ sorted(self.preproc.field_presence_infos.keys())
+ sorted(self.preproc.seq_lengths.keys()),
special_elems=(),
)
else:
self.node_type_vocab = vocab.Vocab(
sorted(self.preproc.primitive_types)
+ sorted(self.ast_wrapper.custom_primitive_types)
+ sorted(self.ast_wrapper.sum_types.keys())
+ sorted(self.ast_wrapper.singular_types.keys())
+ sorted(self.preproc.seq_lengths.keys()),
special_elems=(),
)
self.state_update = variational_lstm.RecurrentDropoutLSTMCell(
input_size=self.rule_emb_size * 2 + self.enc_recurrent_size
+ self.recurrent_size + self.node_emb_size,
hidden_size=self.recurrent_size,
dropout=dropout,
)
self.attn_type = desc_attn
if desc_attn == 'bahdanau':
self.desc_attn = attention.BahdanauAttention(
query_size=self.recurrent_size,
value_size=self.enc_recurrent_size,
proj_size=50,
)
elif desc_attn == 'mha':
self.desc_attn = attention.MultiHeadedAttention(
h=8,
query_size=self.recurrent_size,
value_size=self.enc_recurrent_size,
)
elif desc_attn == 'mha-1h':
self.desc_attn = attention.MultiHeadedAttention(
h=1,
query_size=self.recurrent_size,
value_size=self.enc_recurrent_size,
)
elif desc_attn == 'sep':
self.question_attn = attention.MultiHeadedAttention(
h=1,
query_size=self.recurrent_size,
value_size=self.enc_recurrent_size,
)
self.schema_attn = attention.MultiHeadedAttention(
h=1,
query_size=self.recurrent_size,
value_size=self.enc_recurrent_size,
)
else:
# TODO: Figure out how to get right sizes (query, value) to module
self.desc_attn = desc_attn
self.sup_att = sup_att
self.rule_logits = torch.nn.Sequential(
torch.nn.Linear(self.recurrent_size, self.rule_emb_size),
torch.nn.Tanh(),
torch.nn.Linear(self.rule_emb_size, len(self.rules_index)),
)
self.rule_embedding = torch.nn.Embedding(
num_embeddings=len(self.rules_index),
embedding_dim=self.rule_emb_size,
)
self.gen_logodds = torch.nn.Linear(self.recurrent_size, 1)
self.terminal_logits = torch.nn.Sequential(
torch.nn.Linear(self.recurrent_size, self.rule_emb_size),
torch.nn.Tanh(),
torch.nn.Linear(self.rule_emb_size, len(self.terminal_vocab)),
)
self.terminal_embedding = torch.nn.Embedding(
num_embeddings=len(self.terminal_vocab),
embedding_dim=self.rule_emb_size,
)
if copy_pointer is None:
self.copy_pointer = attention.BahdanauPointer(
query_size=self.recurrent_size,
key_size=self.enc_recurrent_size,
proj_size=50,
)
else:
# TODO: Figure out how to get right sizes (query, key) to module
self.copy_pointer = copy_pointer
if multi_loss_type == 'logsumexp':
self.multi_loss_reduction = lambda logprobs: - \
torch.logsumexp(logprobs, dim=1)
elif multi_loss_type == 'mean':
self.multi_loss_reduction = lambda logprobs: - \
torch.mean(logprobs, dim=1)
self.pointers = torch.nn.ModuleDict()
self.pointer_action_emb_proj = torch.nn.ModuleDict()
for pointer_type in self.preproc.grammar.pointers:
self.pointers[pointer_type] = attention.ScaledDotProductPointer(
query_size=self.recurrent_size,
key_size=self.enc_recurrent_size,
)
self.pointer_action_emb_proj[pointer_type] = torch.nn.Linear(
self.enc_recurrent_size, self.rule_emb_size,
)
self.node_type_embedding = torch.nn.Embedding(
num_embeddings=len(self.node_type_vocab),
embedding_dim=self.node_emb_size,
)
# TODO batching
self.zero_rule_emb = torch.zeros(
1, self.rule_emb_size, device=self._device,
)
self.zero_recurrent_emb = torch.zeros(
1, self.recurrent_size, device=self._device,
)
if loss_type == 'softmax':
self.xent_loss = torch.nn.CrossEntropyLoss(reduction='none')
elif loss_type == 'entmax':
self.xent_loss = entmax.entmax15_loss
elif loss_type == 'sparsemax':
self.xent_loss = entmax.sparsemax_loss
elif loss_type == 'label_smooth':
self.xent_loss = self.label_smooth_loss
def label_smooth_loss(self, X, target, smooth_value=0.1):
if self.training:
logits = torch.log_softmax(X, dim=1)
size = X.size()[1]
one_hot = torch.full(
X.size(), smooth_value
/ (size - 1),
).to(X.device)
one_hot.scatter_(1, target.unsqueeze(0), 1 - smooth_value)
loss = F.kl_div(logits, one_hot, reduction='batchmean')
return loss.unsqueeze(0)
else:
return torch.nn.functional.cross_entropy(X, target, reduction='none')
@classmethod
def _calculate_rules(cls, preproc):
offset = 0
all_rules = []
rules_mask = {}
# Rules of the form:
# expr -> Attribute | Await | BinOp | BoolOp | ...
# expr_seq_elem -> Attribute | Await | ... | Template1 | Template2 | ...
for parent, children in sorted(preproc.sum_type_constructors.items()):
assert parent not in rules_mask
rules_mask[parent] = (offset, offset + len(children))
offset += len(children)
all_rules += [(parent, child) for child in children]
# Rules of the form:
# FunctionDef
# -> identifier name, arguments args
# | identifier name, arguments args, stmt* body
# | identifier name, arguments args, expr* decorator_list
# | identifier name, arguments args, expr? returns
# ...
# | identifier name, arguments args, stmt* body, expr* decorator_list, expr returns
for name, field_presence_infos in sorted(preproc.field_presence_infos.items()):
assert name not in rules_mask
rules_mask[name] = (offset, offset + len(field_presence_infos))
offset += len(field_presence_infos)
all_rules += [
(name, presence)
for presence in field_presence_infos
]
# Rules of the form:
# stmt* -> stmt
# | stmt stmt
# | stmt stmt stmt
for seq_type_name, lengths in sorted(preproc.seq_lengths.items()):
assert seq_type_name not in rules_mask
rules_mask[seq_type_name] = (offset, offset + len(lengths))
offset += len(lengths)
all_rules += [(seq_type_name, i) for i in lengths]
return all_rules, rules_mask
def compute_loss(self, enc_input, example, desc_enc, debug):
if not (self.enumerate_order and self.training):
mle_loss = self.compute_mle_loss(
enc_input, example, desc_enc, debug,
)
else:
mle_loss = self.compute_loss_from_all_ordering(
enc_input, example, desc_enc, debug,
)
if self.use_align_loss:
align_loss = self.compute_align_loss(desc_enc, example)
return mle_loss + align_loss
return mle_loss
def compute_loss_from_all_ordering(self, enc_input, example, desc_enc, debug):
def get_permutations(node):
def traverse_tree(node):
nonlocal permutations
if isinstance(node, (list, tuple)):
p = itertools.permutations(range(len(node)))
permutations.append(list(p))
for child in node:
traverse_tree(child)
elif isinstance(node, dict):
for node_name in node:
traverse_tree(node[node_name])
permutations = []
traverse_tree(node)
return permutations
def get_perturbed_tree(node, permutation):
def traverse_tree(node, parent_type, parent_node):
if isinstance(node, (list, tuple)):
nonlocal permutation
p_node = [node[i] for i in permutation[0]]
parent_node[parent_type] = p_node
permutation = permutation[1:]
for child in node:
traverse_tree(child, None, None)
elif isinstance(node, dict):
for node_name in node:
traverse_tree(node[node_name], node_name, node)
node = copy.deepcopy(node)
traverse_tree(node, None, None)
return node
orig_tree = example.tree
permutations = get_permutations(orig_tree)
products = itertools.product(*permutations)
loss_list = []
for product in products:
tree = get_perturbed_tree(orig_tree, product)
example.tree = tree
loss = self.compute_mle_loss(enc_input, example, desc_enc)
loss_list.append(loss)
example.tree = orig_tree
loss_v = torch.stack(loss_list, 0)
return torch.logsumexp(loss_v, 0)
def compute_mle_loss(self, enc_input, example, desc_enc, debug=False):
traversal = TrainTreeTraversal(self, desc_enc, debug)
traversal.step(None)
queue = [
TreeState(
node=example.tree,
parent_field_type=self.preproc.grammar.root_type,
),
]
while queue:
item = queue.pop()
node = item.node
parent_field_type = item.parent_field_type
if isinstance(node, (list, tuple)):
node_type = parent_field_type + '*'
rule = (node_type, len(node))
rule_idx = self.rules_index[rule]
assert traversal.cur_item.state == TreeTraversal.State.LIST_LENGTH_APPLY
traversal.step(rule_idx)
if self.preproc.use_seq_elem_rules and parent_field_type in self.ast_wrapper.sum_types:
parent_field_type += '_seq_elem'
for i, elem in reversed(list(enumerate(node))):
queue.append(
TreeState(
node=elem,
parent_field_type=parent_field_type,
),
)
continue
if parent_field_type in self.preproc.grammar.pointers:
assert isinstance(node, int)
assert traversal.cur_item.state == TreeTraversal.State.POINTER_APPLY
pointer_map = desc_enc.pointer_maps.get(parent_field_type)
if pointer_map:
values = pointer_map[node]
if self.sup_att == '1h':
if len(pointer_map) == len(enc_input['columns']):
if self.attn_type != 'sep':
traversal.step(
values[0], values[1:], node
+ len(enc_input['question']),
)
else:
traversal.step(values[0], values[1:], node)
else:
if self.attn_type != 'sep':
traversal.step(
values[0], values[1:],
node + len(enc_input['question'])
+ len(enc_input['columns']),
)
else:
traversal.step(
values[0], values[1:], node
+ len(enc_input['columns']),
)
else:
traversal.step(values[0], values[1:])
else:
traversal.step(node)
continue
if parent_field_type in self.ast_wrapper.primitive_types:
# identifier, int, string, bytes, object, singleton
# - could be bytes, str, int, float, bool, NoneType
# - terminal tokens vocabulary is created by turning everything into a string (with `str`)
# - at decoding time, cast back to str/int/float/bool
field_value_split = self.preproc.grammar.tokenize_field_value(node) + [
vocab.EOS,
]
for token in field_value_split:
assert traversal.cur_item.state == TreeTraversal.State.GEN_TOKEN
traversal.step(token)
continue
type_info = self.ast_wrapper.singular_types[node['_type']]
if parent_field_type in self.preproc.sum_type_constructors:
# ApplyRule, like expr -> Call
rule = (parent_field_type, type_info.name)
rule_idx = self.rules_index[rule]
assert traversal.cur_item.state == TreeTraversal.State.SUM_TYPE_APPLY
extra_rules = [
self.rules_index[parent_field_type, extra_type]
for extra_type in node.get('_extra_types', [])
]
traversal.step(rule_idx, extra_rules)
if type_info.fields:
# ApplyRule, like Call -> expr[func] expr*[args] keyword*[keywords]
# Figure out which rule needs to be applied
present = get_field_presence_info(
self.ast_wrapper, node, type_info.fields,
)
rule = (node['_type'], tuple(present))
rule_idx = self.rules_index[rule]
assert traversal.cur_item.state == TreeTraversal.State.CHILDREN_APPLY
traversal.step(rule_idx)
# reversed so that we perform a DFS in left-to-right order
for field_info in reversed(type_info.fields):
if field_info.name not in node:
continue
queue.append(
TreeState(
node=node[field_info.name],
parent_field_type=field_info.type,
),
)
loss = torch.sum(torch.stack(tuple(traversal.loss), dim=0), dim=0)
if debug:
return loss, [attr.asdict(entry) for entry in traversal.history]
else:
return loss
def begin_inference(self, desc_enc, example):
traversal = InferenceTreeTraversal(self, desc_enc, example)
choices = traversal.step(None)
return traversal, choices
def _desc_attention(self, prev_state, desc_enc):
# prev_state shape:
# - h_n: batch (=1) x emb_size
# - c_n: batch (=1) x emb_size
query = prev_state[0]
if self.attn_type != 'sep':
return self.desc_attn(query, desc_enc.memory, attn_mask=None)
else:
question_context, question_attention_logits = self.question_attn(
query, desc_enc.question_memory,
)
schema_context, schema_attention_logits = self.schema_attn(
query, desc_enc.schema_memory,
)
return question_context + schema_context, schema_attention_logits
def _tensor(self, data, dtype=None):
return torch.tensor(data, dtype=dtype, device=self._device)
def _index(self, vocab, word):
return self._tensor([vocab.index(word)])
def _update_state(
self,
node_type,
prev_state,
prev_action_emb,
parent_h,
parent_action_emb,
desc_enc,
):
# desc_context shape: batch (=1) x emb_size
desc_context, attention_logits = self._desc_attention(
prev_state, desc_enc,
)
# node_type_emb shape: batch (=1) x emb_size
node_type_emb = self.node_type_embedding(
self._index(self.node_type_vocab, node_type),
)
state_input = torch.cat(
(
prev_action_emb, # a_{t-1}: rule_emb_size
desc_context, # c_t: enc_recurrent_size
parent_h, # s_{p_t}: recurrent_size
parent_action_emb, # a_{p_t}: rule_emb_size
node_type_emb, # n_{f-t}: node_emb_size
),
dim=-1,
)
new_state = self.state_update(
# state_input shape: batch (=1) x (emb_size * 5)
state_input, prev_state,
)
return new_state, attention_logits
def apply_rule(
self,
node_type,
prev_state,
prev_action_emb,
parent_h,
parent_action_emb,
desc_enc,
):
new_state, attention_logits = self._update_state(
node_type, prev_state, prev_action_emb, parent_h, parent_action_emb, desc_enc,
)
# output shape: batch (=1) x emb_size
output = new_state[0]
# rule_logits shape: batch (=1) x num choices
rule_logits = self.rule_logits(output)
return output, new_state, rule_logits
def rule_infer(self, node_type, rule_logits):
rule_logprobs = torch.nn.functional.log_softmax(rule_logits, dim=-1)
rules_start, rules_end = self.preproc.rules_mask[node_type]
# TODO: Mask other probabilities first?
return list(
zip(
range(rules_start, rules_end),
rule_logprobs[0, rules_start:rules_end],
),
)
def gen_token(
self,
node_type,
prev_state,
prev_action_emb,
parent_h,
parent_action_emb,
desc_enc,
):
new_state, attention_logits = self._update_state(
node_type, prev_state, prev_action_emb, parent_h, parent_action_emb, desc_enc,
)
# output shape: batch (=1) x emb_size
output = new_state[0]
# gen_logodds shape: batch (=1)
gen_logodds = self.gen_logodds(output).squeeze(1)
return new_state, output, gen_logodds
def gen_token_loss(
self,
output,
gen_logodds,
token,
desc_enc,
):
# token_idx shape: batch (=1), LongTensor
token_idx = self._index(self.terminal_vocab, token)
# action_emb shape: batch (=1) x emb_size
# +unk, +in desc: copy
# +unk, -in desc: gen (an unk token)
# -unk, +in desc: copy, gen
# -unk, -in desc: gen
# gen_logodds shape: batch (=1)
desc_locs = desc_enc.find_word_occurrences(token)
if desc_locs:
# copy: if the token appears in the description at least once
# copy_loc_logits shape: batch (=1) x desc length
copy_loc_logits = self.copy_pointer(output, desc_enc.memory)
copy_logprob = (
# log p(copy | output)
# shape: batch (=1)
torch.nn.functional.logsigmoid(-gen_logodds)
# xent_loss: -log p(location | output)
# TODO: sum the probability of all occurrences
# shape: batch (=1)
- self.xent_loss(copy_loc_logits, self._tensor(desc_locs[0:1]))
)
else:
copy_logprob = None
# gen: ~(unk & in desc), equivalent to ~unk | ~in desc
if token in self.terminal_vocab or copy_logprob is None:
token_logits = self.terminal_logits(output)
# shape:
gen_logprob = (
# log p(gen | output)
# shape: batch (=1)
torch.nn.functional.logsigmoid(gen_logodds)
# xent_loss: -log p(token | output)
# shape: batch (=1)
- self.xent_loss(token_logits, token_idx)
)
else:
gen_logprob = None
# loss should be -log p(...), so negate
loss_piece = -torch.logsumexp(
maybe_stack([copy_logprob, gen_logprob], dim=1),
dim=1,
)
return loss_piece
def token_infer(self, output, gen_logodds, desc_enc):
# Copy tokens
# log p(copy | output)
# shape: batch (=1)
copy_logprob = torch.nn.functional.logsigmoid(-gen_logodds)
copy_loc_logits = self.copy_pointer(output, desc_enc.memory)
# log p(loc_i | copy, output)
# shape: batch (=1) x seq length
copy_loc_logprobs = torch.nn.functional.log_softmax(
copy_loc_logits, dim=-1,
)
# log p(loc_i, copy | output)
copy_loc_logprobs += copy_logprob
log_prob_by_word = {}
# accumulate_logprobs is needed because the same word may appear
# multiple times in desc_enc.words.
accumulate_logprobs(
log_prob_by_word,
zip(desc_enc.words, copy_loc_logprobs.squeeze(0)),
)
# Generate tokens
# log p(~copy | output)
# shape: batch (=1)
gen_logprob = torch.nn.functional.logsigmoid(gen_logodds)
token_logits = self.terminal_logits(output)
# log p(v | ~copy, output)
# shape: batch (=1) x vocab size
token_logprobs = torch.nn.functional.log_softmax(token_logits, dim=-1)
# log p(v, ~copy| output)
# shape: batch (=1) x vocab size
token_logprobs += gen_logprob
accumulate_logprobs(
log_prob_by_word,
((self.terminal_vocab[idx], token_logprobs[0, idx])
for idx in range(token_logprobs.shape[1])),
)
return list(log_prob_by_word.items())
def compute_pointer(
self,
node_type,
prev_state,
prev_action_emb,
parent_h,
parent_action_emb,
desc_enc,
):
new_state, attention_logits = self._update_state(
node_type, prev_state, prev_action_emb, parent_h, parent_action_emb, desc_enc,
)
# output shape: batch (=1) x emb_size
output = new_state[0]
# pointer_logits shape: batch (=1) x num choices
pointer_logits = self.pointers[node_type](
output, desc_enc.pointer_memories[node_type],
)
return output, new_state, pointer_logits, attention_logits
def pointer_infer(self, node_type, logits):
logprobs = torch.nn.functional.log_softmax(logits, dim=-1)
return list(
zip(
# TODO batching
range(logits.shape[1]),
logprobs[0],
),
)
| 38.057576 | 106 | 0.566022 |
dd00373802cecaad52bc54c36933fc5d0db77f21 | 941 | py | Python | wherenottoeat/eatery/migrations/0005_auto_20161113_1803.py | jrigden/wherenottoeat.net | 67424b8795e030fa29b8a995c735e5cddc3f166f | [
"MIT"
] | null | null | null | wherenottoeat/eatery/migrations/0005_auto_20161113_1803.py | jrigden/wherenottoeat.net | 67424b8795e030fa29b8a995c735e5cddc3f166f | [
"MIT"
] | null | null | null | wherenottoeat/eatery/migrations/0005_auto_20161113_1803.py | jrigden/wherenottoeat.net | 67424b8795e030fa29b8a995c735e5cddc3f166f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-13 18:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('eatery', '0004_auto_20161113_1744'),
]
operations = [
migrations.AlterField(
model_name='restaurant',
name='Address',
field=models.CharField(max_length=75),
),
migrations.AlterField(
model_name='restaurant',
name='Description',
field=models.CharField(max_length=75),
),
migrations.AlterField(
model_name='restaurant',
name='Name',
field=models.CharField(max_length=75),
),
migrations.AlterField(
model_name='restaurant',
name='Program_Identifier',
field=models.CharField(max_length=75),
),
]
| 26.138889 | 50 | 0.577046 |
4147aab5f0d11ff0f2c52c82ec4bb3ba4caa12f5 | 113 | py | Python | tf_implementation/segmentation/metrics/segmentation.py | arekmula/skull_stripping | d03cef81392f8cd243dc1c6d32ffa897af922eb2 | [
"MIT"
] | 3 | 2021-02-23T15:26:40.000Z | 2021-08-11T19:36:21.000Z | tf_implementation/segmentation/metrics/segmentation.py | arekmula/skull_stripping | d03cef81392f8cd243dc1c6d32ffa897af922eb2 | [
"MIT"
] | null | null | null | tf_implementation/segmentation/metrics/segmentation.py | arekmula/skull_stripping | d03cef81392f8cd243dc1c6d32ffa897af922eb2 | [
"MIT"
] | null | null | null | import segmentation_models as sm
def f_score(threshold=0.5):
return sm.metrics.FScore(threshold=threshold)
| 18.833333 | 49 | 0.787611 |
c7de397dafe13afff433057924d2887a4133a76a | 44 | py | Python | enthought/kiva/constants.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 3 | 2016-12-09T06:05:18.000Z | 2018-03-01T13:00:29.000Z | enthought/kiva/constants.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 1 | 2020-12-02T00:51:32.000Z | 2020-12-02T08:48:55.000Z | enthought/kiva/constants.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | null | null | null | # proxy module
from kiva.constants import *
| 14.666667 | 28 | 0.772727 |
0253594d73091b2f09814bcc6a90f24a9be03c48 | 501 | py | Python | components/markers/lambdaProblemMarker.py | Linzee/tmsei_doodle | b08580fe5d58f6dd614ba6f3f53a03bcd859277b | [
"MIT"
] | null | null | null | components/markers/lambdaProblemMarker.py | Linzee/tmsei_doodle | b08580fe5d58f6dd614ba6f3f53a03bcd859277b | [
"MIT"
] | null | null | null | components/markers/lambdaProblemMarker.py | Linzee/tmsei_doodle | b08580fe5d58f6dd614ba6f3f53a03bcd859277b | [
"MIT"
] | null | null | null | import pandas as pd
from components.flowUtils import annotateProgress, cached
class LambdaProblemMarker:
def __init__(self, flow, lambdaMarker=None):
self.lambdaMarker = lambdaMarker
self.similarityMatrix = flow.getSimilarityMatrix()
self.problems = flow.getProblems()
@annotateProgress
@cached
def getMarkers(self):
return pd.Series(list(self.similarityMatrix), list(self.similarityMatrix)).apply(lambda pid: self.lambdaMarker(self.problems[pid]))
| 31.3125 | 139 | 0.738523 |
cd1ee076814bc0d59a4fb5a40c7385f5d61038a4 | 2,255 | py | Python | Dorta/accounting_modification/models/account_invoice_modify.py | aaparicio87/Odoo12 | 25cfc349b2e85fa1b5f5846ffe693029f77b3b7d | [
"MIT"
] | null | null | null | Dorta/accounting_modification/models/account_invoice_modify.py | aaparicio87/Odoo12 | 25cfc349b2e85fa1b5f5846ffe693029f77b3b7d | [
"MIT"
] | null | null | null | Dorta/accounting_modification/models/account_invoice_modify.py | aaparicio87/Odoo12 | 25cfc349b2e85fa1b5f5846ffe693029f77b3b7d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from odoo import models, fields, api, _
class AccountInvoiceModification(models.Model):
_inherit = "account.invoice"
sales_team_user_id = fields.Many2one('res.users', string="Líder de Equipo", related="team_id.user_id")
# @api.multi
# def _get_report_base_filename(self):
# self.ensure_one()
# return self.type == 'out_invoice' and self.state == 'draft' and _('Draft Amending Invoice') or \
# self.type == 'out_invoice' and self.state in ('open', 'in_payment', 'paid') and _('Invoice - %s') % (
# self.number) or \
# self.type == 'out_refund' and self.state == 'draft' and _('Credit Note') or \
# self.type == 'out_refund' and _('Credit Note - %s') % (self.number) or \
# self.type == 'in_invoice' and self.state == 'draft' and _('Vendor Bill') or \
# self.type == 'in_invoice' and self.state in ('open', 'in_payment', 'paid') and _('Vendor Bill - %s') % (
# self.number) or \
# self.type == 'in_refund' and self.state == 'draft' and _('Vendor Credit Note') or \
# self.type == 'in_refund' and _('Vendor Credit Note - %s') % (self.number)
@api.onchange('partner_id', 'company_id')
def _onchange_partner_id(self):
res = super(AccountInvoiceModification, self)._onchange_partner_id()
if self.partner_id and self.partner_id.note_for_invoice:
self.comment = self.partner_id.note_for_invoice
else:
self.comment = ''
return res
class AccountPayment(models.Model):
_inherit = 'account.payment'
state = fields.Selection([('draft', 'Borrador'), ('posted', 'Pendiente'), ('sent', 'Enviado'), ('reconciled', 'Reconciliado'),
('cancelled', 'Cancelado')], readonly=True, default='draft', copy=False, string="Status")
entry_date = fields.Date(string="Fecha de Pago", default=lambda self: fields.Date.context_today(self),
translate=True)
# @api.model
# def default_get(self, fields):
# res = super(AccountPayment, self).default_get(fields)
# res['entry_date'] = fields.Date.today()
# return res
| 47.978723 | 130 | 0.594678 |
9317b2ec943d3c5f7f57e6f0698a068209046ba6 | 8,079 | py | Python | esphome/components/font/__init__.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 249 | 2018-04-07T12:04:11.000Z | 2019-01-25T01:11:34.000Z | esphome/components/font/__init__.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 243 | 2018-04-11T16:37:11.000Z | 2019-01-25T16:50:37.000Z | esphome/components/font/__init__.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 40 | 2018-04-10T05:50:14.000Z | 2019-01-25T15:20:36.000Z | import functools
from pathlib import Path
import hashlib
import re
import requests
from esphome import core
from esphome.components import display
import esphome.config_validation as cv
import esphome.codegen as cg
from esphome.const import (
CONF_FAMILY,
CONF_FILE,
CONF_GLYPHS,
CONF_ID,
CONF_RAW_DATA_ID,
CONF_TYPE,
CONF_SIZE,
CONF_PATH,
CONF_WEIGHT,
)
from esphome.core import CORE, HexInt
DOMAIN = "font"
DEPENDENCIES = ["display"]
MULTI_CONF = True
Font = display.display_ns.class_("Font")
Glyph = display.display_ns.class_("Glyph")
GlyphData = display.display_ns.struct("GlyphData")
def validate_glyphs(value):
if isinstance(value, list):
value = cv.Schema([cv.string])(value)
value = cv.Schema([cv.string])(list(value))
def comparator(x, y):
x_ = x.encode("utf-8")
y_ = y.encode("utf-8")
for c in range(min(len(x_), len(y_))):
if x_[c] < y_[c]:
return -1
if x_[c] > y_[c]:
return 1
if len(x_) < len(y_):
return -1
if len(x_) > len(y_):
return 1
raise cv.Invalid(f"Found duplicate glyph {x}")
value.sort(key=functools.cmp_to_key(comparator))
return value
def validate_pillow_installed(value):
try:
import PIL
except ImportError as err:
raise cv.Invalid(
"Please install the pillow python package to use this feature. "
"(pip install pillow)"
) from err
if PIL.__version__[0] < "4":
raise cv.Invalid(
"Please update your pillow installation to at least 4.0.x. "
"(pip install -U pillow)"
)
return value
def validate_truetype_file(value):
if value.endswith(".zip"): # for Google Fonts downloads
raise cv.Invalid(
f"Please unzip the font archive '{value}' first and then use the .ttf files inside."
)
if not value.endswith(".ttf"):
raise cv.Invalid(
"Only truetype (.ttf) files are supported. Please make sure you're "
"using the correct format or rename the extension to .ttf"
)
return cv.file_(value)
def _compute_gfonts_local_path(value) -> Path:
name = f"{value[CONF_FAMILY]}@{value[CONF_WEIGHT]}@{value[CONF_ITALIC]}@v1"
base_dir = Path(CORE.config_dir) / ".esphome" / DOMAIN
h = hashlib.new("sha256")
h.update(name.encode())
return base_dir / h.hexdigest()[:8] / "font.ttf"
TYPE_LOCAL = "local"
TYPE_GFONTS = "gfonts"
LOCAL_SCHEMA = cv.Schema(
{
cv.Required(CONF_PATH): validate_truetype_file,
}
)
CONF_ITALIC = "italic"
FONT_WEIGHTS = {
"thin": 100,
"extra-light": 200,
"light": 300,
"regular": 400,
"medium": 500,
"semi-bold": 600,
"bold": 700,
"extra-bold": 800,
"black": 900,
}
def validate_weight_name(value):
return FONT_WEIGHTS[cv.one_of(*FONT_WEIGHTS, lower=True, space="-")(value)]
def download_gfonts(value):
wght = value[CONF_WEIGHT]
if value[CONF_ITALIC]:
wght = f"1,{wght}"
name = f"{value[CONF_FAMILY]}@{value[CONF_WEIGHT]}"
url = f"https://fonts.googleapis.com/css2?family={value[CONF_FAMILY]}:wght@{wght}"
path = _compute_gfonts_local_path(value)
if path.is_file():
return value
try:
req = requests.get(url)
req.raise_for_status()
except requests.exceptions.RequestException as e:
raise cv.Invalid(
f"Could not download font for {name}, please check the fonts exists "
f"at google fonts ({e})"
)
match = re.search(r"src:\s+url\((.+)\)\s+format\('truetype'\);", req.text)
if match is None:
raise cv.Invalid(
f"Could not extract ttf file from gfonts response for {name}, "
f"please report this."
)
ttf_url = match.group(1)
try:
req = requests.get(ttf_url)
req.raise_for_status()
except requests.exceptions.RequestException as e:
raise cv.Invalid(f"Could not download ttf file for {name} ({ttf_url}): {e}")
path.parent.mkdir(exist_ok=True, parents=True)
path.write_bytes(req.content)
return value
GFONTS_SCHEMA = cv.All(
{
cv.Required(CONF_FAMILY): cv.string_strict,
cv.Optional(CONF_WEIGHT, default="regular"): cv.Any(
cv.int_, validate_weight_name
),
cv.Optional(CONF_ITALIC, default=False): cv.boolean,
},
download_gfonts,
)
def validate_file_shorthand(value):
value = cv.string_strict(value)
if value.startswith("gfonts://"):
match = re.match(r"^gfonts://([^@]+)(@.+)?$", value)
if match is None:
raise cv.Invalid("Could not parse gfonts shorthand syntax, please check it")
family = match.group(1)
weight = match.group(2)
data = {
CONF_TYPE: TYPE_GFONTS,
CONF_FAMILY: family,
}
if weight is not None:
data[CONF_WEIGHT] = weight[1:]
return FILE_SCHEMA(data)
return FILE_SCHEMA(
{
CONF_TYPE: TYPE_LOCAL,
CONF_PATH: value,
}
)
TYPED_FILE_SCHEMA = cv.typed_schema(
{
TYPE_LOCAL: LOCAL_SCHEMA,
TYPE_GFONTS: GFONTS_SCHEMA,
}
)
def _file_schema(value):
if isinstance(value, str):
return validate_file_shorthand(value)
return TYPED_FILE_SCHEMA(value)
FILE_SCHEMA = cv.Schema(_file_schema)
DEFAULT_GLYPHS = (
' !"%()+=,-.:/0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz°'
)
CONF_RAW_GLYPH_ID = "raw_glyph_id"
FONT_SCHEMA = cv.Schema(
{
cv.Required(CONF_ID): cv.declare_id(Font),
cv.Required(CONF_FILE): FILE_SCHEMA,
cv.Optional(CONF_GLYPHS, default=DEFAULT_GLYPHS): validate_glyphs,
cv.Optional(CONF_SIZE, default=20): cv.int_range(min=1),
cv.GenerateID(CONF_RAW_DATA_ID): cv.declare_id(cg.uint8),
cv.GenerateID(CONF_RAW_GLYPH_ID): cv.declare_id(GlyphData),
}
)
CONFIG_SCHEMA = cv.All(validate_pillow_installed, FONT_SCHEMA)
async def to_code(config):
from PIL import ImageFont
conf = config[CONF_FILE]
if conf[CONF_TYPE] == TYPE_LOCAL:
path = CORE.relative_config_path(conf[CONF_PATH])
elif conf[CONF_TYPE] == TYPE_GFONTS:
path = _compute_gfonts_local_path(conf)
try:
font = ImageFont.truetype(str(path), config[CONF_SIZE])
except Exception as e:
raise core.EsphomeError(f"Could not load truetype file {path}: {e}")
ascent, descent = font.getmetrics()
glyph_args = {}
data = []
for glyph in config[CONF_GLYPHS]:
mask = font.getmask(glyph, mode="1")
_, (offset_x, offset_y) = font.font.getsize(glyph)
width, height = mask.size
width8 = ((width + 7) // 8) * 8
glyph_data = [0] * (height * width8 // 8)
for y in range(height):
for x in range(width):
if not mask.getpixel((x, y)):
continue
pos = x + y * width8
glyph_data[pos // 8] |= 0x80 >> (pos % 8)
glyph_args[glyph] = (len(data), offset_x, offset_y, width, height)
data += glyph_data
rhs = [HexInt(x) for x in data]
prog_arr = cg.progmem_array(config[CONF_RAW_DATA_ID], rhs)
glyph_initializer = []
for glyph in config[CONF_GLYPHS]:
glyph_initializer.append(
cg.StructInitializer(
GlyphData,
("a_char", glyph),
(
"data",
cg.RawExpression(f"{str(prog_arr)} + {str(glyph_args[glyph][0])}"),
),
("offset_x", glyph_args[glyph][1]),
("offset_y", glyph_args[glyph][2]),
("width", glyph_args[glyph][3]),
("height", glyph_args[glyph][4]),
)
)
glyphs = cg.static_const_array(config[CONF_RAW_GLYPH_ID], glyph_initializer)
cg.new_Pvariable(
config[CONF_ID], glyphs, len(glyph_initializer), ascent, ascent + descent
)
| 27.955017 | 96 | 0.605768 |
8d3fb17c426d3bc47d1c94199bfea3de5c646a0e | 1,242 | py | Python | tolerant/response.py | christiemj09/tolerant | c936c9879a2220ea798a5a51736da6283364bb1f | [
"MIT"
] | null | null | null | tolerant/response.py | christiemj09/tolerant | c936c9879a2220ea798a5a51736da6283364bb1f | [
"MIT"
] | null | null | null | tolerant/response.py | christiemj09/tolerant | c936c9879a2220ea798a5a51736da6283364bb1f | [
"MIT"
] | null | null | null | """
Wrap functions in error-tolerant response objects.
"""
class Response(object):
"""A response from a function."""
def __init__(self, val, func, error=None):
self.val = val
self.func = func
self.error = error
def __call__(self):
return self.val
def response(func):
"""Wrap the evaluation of a function in a Response object."""
def decorator(*args, **kwargs):
try:
resp = Response(func(*args, **kwargs), func)
except Exception as e:
resp = Response(None, func, error=e)
return resp
return decorator
class LazyResponse(object):
"""A lazily evaluated response from a function."""
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
self.error = None
def __call__(self):
try:
return self.func(*self.args, **self.kwargs)
except Exception as e:
self.error = e
def lazy_response(func):
"""Wrap the evaluation of a function in a LazyResponse object."""
def decorator(*args, **kwargs):
return LazyResponse(func, *args, **kwargs)
return decorator
| 22.581818 | 69 | 0.580515 |
5d2c409ac445aaa8b25322946247a2d1292f80ae | 6,474 | py | Python | sdk/python/pulumi_aws/directoryservice/log_service.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/directoryservice/log_service.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/directoryservice/log_service.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = ['LogService']
class LogService(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
directory_id: Optional[pulumi.Input[str]] = None,
log_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a Log subscription for AWS Directory Service that pushes logs to cloudwatch.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_log_group = aws.cloudwatch.LogGroup("exampleLogGroup", retention_in_days=14)
ad_log_policy_policy_document = example_log_group.arn.apply(lambda arn: aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
actions=[
"logs:CreateLogStream",
"logs:PutLogEvents",
],
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
identifiers=["ds.amazonaws.com"],
type="Service",
)],
resources=[f"{arn}:*"],
effect="Allow",
)]))
ad_log_policy_log_resource_policy = aws.cloudwatch.LogResourcePolicy("ad-log-policyLogResourcePolicy",
policy_document=ad_log_policy_policy_document.json,
policy_name="ad-log-policy")
example_log_service = aws.directoryservice.LogService("exampleLogService",
directory_id=aws_directory_service_directory["example"]["id"],
log_group_name=example_log_group.name)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] directory_id: The id of directory.
:param pulumi.Input[str] log_group_name: Name of the cloudwatch log group to which the logs should be published. The log group should be already created and the directory service principal should be provided with required permission to create stream and publish logs. Changing this value would delete the current subscription and create a new one. A directory can only have one log subscription at a time.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if directory_id is None:
raise TypeError("Missing required property 'directory_id'")
__props__['directory_id'] = directory_id
if log_group_name is None:
raise TypeError("Missing required property 'log_group_name'")
__props__['log_group_name'] = log_group_name
super(LogService, __self__).__init__(
'aws:directoryservice/logService:LogService',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
directory_id: Optional[pulumi.Input[str]] = None,
log_group_name: Optional[pulumi.Input[str]] = None) -> 'LogService':
"""
Get an existing LogService resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] directory_id: The id of directory.
:param pulumi.Input[str] log_group_name: Name of the cloudwatch log group to which the logs should be published. The log group should be already created and the directory service principal should be provided with required permission to create stream and publish logs. Changing this value would delete the current subscription and create a new one. A directory can only have one log subscription at a time.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["directory_id"] = directory_id
__props__["log_group_name"] = log_group_name
return LogService(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="directoryId")
def directory_id(self) -> pulumi.Output[str]:
"""
The id of directory.
"""
return pulumi.get(self, "directory_id")
@property
@pulumi.getter(name="logGroupName")
def log_group_name(self) -> pulumi.Output[str]:
"""
Name of the cloudwatch log group to which the logs should be published. The log group should be already created and the directory service principal should be provided with required permission to create stream and publish logs. Changing this value would delete the current subscription and create a new one. A directory can only have one log subscription at a time.
"""
return pulumi.get(self, "log_group_name")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 48.676692 | 413 | 0.669293 |
d04c82cfdb34772c0e539ed0430fe05082ed7d44 | 155 | py | Python | customer_app/apps.py | AtakanAytar/Django-Restaurant-app | 30d7e1e4ceaec049858a4199d86783aa214edc16 | [
"MIT"
] | null | null | null | customer_app/apps.py | AtakanAytar/Django-Restaurant-app | 30d7e1e4ceaec049858a4199d86783aa214edc16 | [
"MIT"
] | null | null | null | customer_app/apps.py | AtakanAytar/Django-Restaurant-app | 30d7e1e4ceaec049858a4199d86783aa214edc16 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class CustomerAppConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'customer_app'
| 22.142857 | 56 | 0.774194 |
9cd53c2ed0d9757731b0b514f20ea52398c81a6a | 14,284 | py | Python | official/cv/yolov5/postprocess.py | polar-region/MindSpore | b96bf8e175faabe2521882c0b7f6e89928e267c7 | [
"Apache-2.0"
] | 1 | 2021-11-18T08:17:44.000Z | 2021-11-18T08:17:44.000Z | official/cv/yolov5/postprocess.py | Li-kewei/models | d02ba6a87c37ad9d0bc413413b9e9ddc8c60f43c | [
"Apache-2.0"
] | null | null | null | official/cv/yolov5/postprocess.py | Li-kewei/models | d02ba6a87c37ad9d0bc413413b9e9ddc8c60f43c | [
"Apache-2.0"
] | 2 | 2019-09-01T06:17:04.000Z | 2019-10-04T08:39:45.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""YoloV5 310 infer."""
import os
import sys
import argparse
import datetime
import time
import ast
from collections import defaultdict
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from src.logger import get_logger
parser = argparse.ArgumentParser('yolov5 postprocess')
# dataset related
parser.add_argument('--per_batch_size', default=1, type=int, help='batch size for per gpu')
# logging related
parser.add_argument('--log_path', type=str, default='outputs/', help='checkpoint save location')
# detect_related
parser.add_argument('--nms_thresh', type=float, default=0.6, help='threshold for NMS')
parser.add_argument('--ann_file', type=str, default='', help='path to annotation')
parser.add_argument('--ignore_threshold', type=float, default=0.001, help='threshold to throw low quality boxes')
parser.add_argument('--dataset_path', type=str, default='', help='path of image dataset')
parser.add_argument('--result_files', type=str, default='./result_Files', help='path to 310 infer result path')
parser.add_argument('--multi_label', type=ast.literal_eval, default=True, help='whether to use multi label')
parser.add_argument('--multi_label_thresh', type=float, default=0.1, help='threshold to throw low quality boxes')
args, _ = parser.parse_known_args()
class Redirct:
def __init__(self):
self.content = ""
def write(self, content):
self.content += content
def flush(self):
self.content = ""
class DetectionEngine:
"""Detection engine."""
def __init__(self, args_detection):
self.ignore_threshold = args_detection.ignore_threshold
self.labels = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat',
'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat',
'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
self.num_classes = len(self.labels)
self.results = {}
self.file_path = ''
self.save_prefix = args_detection.outputs_dir
self.ann_file = args_detection.ann_file
self._coco = COCO(self.ann_file)
self._img_ids = list(sorted(self._coco.imgs.keys()))
self.det_boxes = []
self.nms_thresh = args_detection.nms_thresh
self.multi_label = args_detection.multi_label
self.multi_label_thresh = args_detection.multi_label_thresh
self.coco_catIds = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27,
28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53,
54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 84, 85, 86, 87, 88, 89, 90]
def do_nms_for_results(self):
"""Get result boxes."""
for image_id in self.results:
for clsi in self.results[image_id]:
dets = self.results[image_id][clsi]
dets = np.array(dets)
keep_index = self._diou_nms(dets, thresh=self.nms_thresh)
keep_box = [{'image_id': int(image_id), 'category_id': int(clsi),
'bbox': list(dets[i][:4].astype(float)),
'score': dets[i][4].astype(float)} for i in keep_index]
self.det_boxes.extend(keep_box)
def _nms(self, predicts, threshold):
"""Calculate NMS."""
# convert xywh -> xmin ymin xmax ymax
x1 = predicts[:, 0]
y1 = predicts[:, 1]
x2 = x1 + predicts[:, 2]
y2 = y1 + predicts[:, 3]
scores = predicts[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
reserved_boxes = []
while order.size > 0:
i = order[0]
reserved_boxes.append(i)
max_x1 = np.maximum(x1[i], x1[order[1:]])
max_y1 = np.maximum(y1[i], y1[order[1:]])
min_x2 = np.minimum(x2[i], x2[order[1:]])
min_y2 = np.minimum(y2[i], y2[order[1:]])
intersect_w = np.maximum(0.0, min_x2 - max_x1 + 1)
intersect_h = np.maximum(0.0, min_y2 - max_y1 + 1)
intersect_area = intersect_w * intersect_h
ovr = intersect_area / (areas[i] + areas[order[1:]] - intersect_area)
indexes = np.where(ovr <= threshold)[0]
order = order[indexes + 1]
return reserved_boxes
def _diou_nms(self, dets, thresh=0.5):
"""
convert xywh -> xmin ymin xmax ymax
"""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = x1 + dets[:, 2]
y2 = y1 + dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
center_x1 = (x1[i] + x2[i]) / 2
center_x2 = (x1[order[1:]] + x2[order[1:]]) / 2
center_y1 = (y1[i] + y2[i]) / 2
center_y2 = (y1[order[1:]] + y2[order[1:]]) / 2
inter_diag = (center_x2 - center_x1) ** 2 + (center_y2 - center_y1) ** 2
out_max_x = np.maximum(x2[i], x2[order[1:]])
out_max_y = np.maximum(y2[i], y2[order[1:]])
out_min_x = np.minimum(x1[i], x1[order[1:]])
out_min_y = np.minimum(y1[i], y1[order[1:]])
outer_diag = (out_max_x - out_min_x) ** 2 + (out_max_y - out_min_y) ** 2
diou = ovr - inter_diag / outer_diag
diou = np.clip(diou, -1, 1)
inds = np.where(diou <= thresh)[0]
order = order[inds + 1]
return keep
def write_result(self):
"""Save result to file."""
import json
t = datetime.datetime.now().strftime('_%Y_%m_%d_%H_%M_%S')
try:
self.file_path = self.save_prefix + '/predict' + t + '.json'
f = open(self.file_path, 'w')
json.dump(self.det_boxes, f)
except IOError as e:
raise RuntimeError("Unable to open json file to dump. What(): {}".format(str(e)))
else:
f.close()
return self.file_path
def get_eval_result(self):
"""Get eval result."""
coco_gt = COCO(self.ann_file)
coco_dt = coco_gt.loadRes(self.file_path)
coco_eval = COCOeval(coco_gt, coco_dt, 'bbox')
coco_eval.evaluate()
coco_eval.accumulate()
rdct = Redirct()
stdout = sys.stdout
sys.stdout = rdct
coco_eval.summarize()
sys.stdout = stdout
return rdct.content
def detect(self, outputs, batch, img_shape, image_id):
"""Detect boxes."""
outputs_num = len(outputs)
# output [|32, 52, 52, 3, 85| ]
for batch_id in range(batch):
for out_id in range(outputs_num):
# 32, 52, 52, 3, 85
out_item = outputs[out_id]
# 52, 52, 3, 85
out_item_single = out_item[batch_id, :]
# get number of items in one head, [B, gx, gy, anchors, 5+80]
dimensions = out_item_single.shape[:-1]
out_num = 1
for d in dimensions:
out_num *= d
ori_w, ori_h = img_shape[batch_id]
img_id = int(image_id[batch_id])
x = out_item_single[..., 0] * ori_w
y = out_item_single[..., 1] * ori_h
w = out_item_single[..., 2] * ori_w
h = out_item_single[..., 3] * ori_h
conf = out_item_single[..., 4:5]
cls_emb = out_item_single[..., 5:]
cls_argmax = np.expand_dims(np.argmax(cls_emb, axis=-1), axis=-1)
x = x.reshape(-1)
y = y.reshape(-1)
w = w.reshape(-1)
h = h.reshape(-1)
x_top_left = x - w / 2.
y_top_left = y - h / 2.
cls_emb = cls_emb.reshape(-1, self.num_classes)
if self.multi_label:
conf = conf.reshape(-1, 1)
# create all False
confidence = cls_emb * conf
flag = cls_emb > self.multi_label_thresh
flag = flag.nonzero()
for index in range(len(flag[0])):
i = flag[0][index]
j = flag[1][index]
confi = confidence[i][j]
if confi < self.ignore_threshold:
continue
if img_id not in self.results:
self.results[img_id] = defaultdict(list)
x_lefti = max(0, x_top_left[i])
y_lefti = max(0, y_top_left[i])
wi = min(w[i], ori_w)
hi = min(h[i], ori_h)
clsi = j
# transform catId to match coco
coco_clsi = self.coco_catIds[clsi]
self.results[img_id][coco_clsi].append([x_lefti, y_lefti, wi, hi, confi])
else:
cls_argmax = np.expand_dims(np.argmax(cls_emb, axis=-1), axis=-1)
conf = conf.reshape(-1)
cls_argmax = cls_argmax.reshape(-1)
# create all False
flag = np.random.random(cls_emb.shape) > sys.maxsize
for i in range(flag.shape[0]):
c = cls_argmax[i]
flag[i, c] = True
confidence = cls_emb[flag] * conf
for x_lefti, y_lefti, wi, hi, confi, clsi in zip(x_top_left, y_top_left, w, h, confidence,
cls_argmax):
if confi < self.ignore_threshold:
continue
if img_id not in self.results:
self.results[img_id] = defaultdict(list)
x_lefti = max(0, x_lefti)
y_lefti = max(0, y_lefti)
wi = min(wi, ori_w)
hi = min(hi, ori_h)
# transform catId to match coco
coco_clsi = self.coco_catids[clsi]
self.results[img_id][coco_clsi].append([x_lefti, y_lefti, wi, hi, confi])
if __name__ == "__main__":
start_time = time.time()
args.outputs_dir = os.path.join(args.log_path,
datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
args.logger = get_logger(args.outputs_dir, 0)
# init detection engine
detection = DetectionEngine(args)
coco = COCO(args.ann_file)
result_path = args.result_files
files = os.listdir(args.dataset_path)
for file in files:
img_ids_name = file.split('.')[0]
img_id_ = int(np.squeeze(img_ids_name))
imgIds = coco.getImgIds(imgIds=[img_id_])
img = coco.loadImgs(imgIds[np.random.randint(0, len(imgIds))])[0]
image_shape = ((img['width'], img['height']),)
img_id_ = (np.squeeze(img_ids_name),)
result_path_0 = os.path.join(result_path, img_ids_name + "_0.bin")
result_path_1 = os.path.join(result_path, img_ids_name + "_1.bin")
result_path_2 = os.path.join(result_path, img_ids_name + "_2.bin")
output_small = np.fromfile(result_path_0, dtype=np.float32).reshape(1, 20, 20, 3, 85)
output_me = np.fromfile(result_path_1, dtype=np.float32).reshape(1, 40, 40, 3, 85)
output_big = np.fromfile(result_path_2, dtype=np.float32).reshape(1, 80, 80, 3, 85)
detection.detect([output_small, output_me, output_big], args.per_batch_size, image_shape, img_id_)
args.logger.info('Calculating mAP...')
detection.do_nms_for_results()
result_file_path = detection.write_result()
args.logger.info('result file path: {}'.format(result_file_path))
eval_result = detection.get_eval_result()
cost_time = time.time() - start_time
args.logger.info('\n=============coco 310 infer reulst=========\n' + eval_result)
args.logger.info('testing cost time {:.2f}h'.format(cost_time / 3600.))
| 43.950769 | 119 | 0.536754 |
f59d243d07a25e7dcd5e7992c28bf6c35a54fa45 | 1,968 | py | Python | migu.py | squ33ker/Dlink_Parse | b8ea35e64e480720fff5f466c3959e631b379abf | [
"MIT"
] | 1 | 2021-07-06T17:16:42.000Z | 2021-07-06T17:16:42.000Z | migu.py | squ33ker/Dlink_Parse | b8ea35e64e480720fff5f466c3959e631b379abf | [
"MIT"
] | null | null | null | migu.py | squ33ker/Dlink_Parse | b8ea35e64e480720fff5f466c3959e631b379abf | [
"MIT"
] | 1 | 2021-08-05T03:09:07.000Z | 2021-08-05T03:09:07.000Z | import requests
class migu:
def __init__(self, url):
self.url = url
self.cid = self.url.split("cid=")[-1]
self.headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36¬"
}
def get_params(self):
return {
"contId": self.cid,
}
def str_cover_list(self, str):
return list(str)
def get_ddCalcu(self, puData_url):
params_dict = {}
query_string = puData_url.split("?")[-1]
for i in query_string.split("&"):
temp = i.split("=")
params_dict[temp[0]] = temp[1]
puData_list = self.str_cover_list(params_dict['puData'])
p = 0
result = []
while (2 * p) < len(puData_list):
result.append(puData_list[len(puData_list) - p - 1])
if p < len(puData_list) - p - 1:
result.append(params_dict['puData'][p])
if p == 1:
result.append('e')
if p == 2:
result.append(self.str_cover_list(params_dict['timestamp'])[6])
if p == 3:
result.append(self.str_cover_list(params_dict['ProgramID'])[2])
if p == 4:
result.append(self.str_cover_list(params_dict['Channel_ID'])[
len(self.str_cover_list(params_dict['Channel_ID'])) - 4])
p += 1
return ''.join(result)
def calc_url(self, url):
ddCalcu = self.get_ddCalcu(url)
return f"{url}&ddCalcu={ddCalcu}"
def start(self):
res = requests.get("https://webapi.miguvideo.com/gateway/playurl/v3/play/playurl", params=self.get_params(),
headers=self.headers)
puData_url = res.json()['body']['urlInfo']['url']
url = self.calc_url(puData_url)
print(url)
if __name__ == '__main__':
migu().start()
| 33.931034 | 148 | 0.537093 |
36f00c2c37ee193ceadf038ad4449be39e5e6828 | 395 | py | Python | login_register/v1/models.py | General-ITer/Django-Introduction | e88b12682f9abc46a90a0fc79e7443537230a506 | [
"Apache-2.0"
] | null | null | null | login_register/v1/models.py | General-ITer/Django-Introduction | e88b12682f9abc46a90a0fc79e7443537230a506 | [
"Apache-2.0"
] | 1 | 2020-12-09T18:26:36.000Z | 2020-12-09T18:26:36.000Z | login_register/v1/models.py | General-ITer/Django-Introduction | e88b12682f9abc46a90a0fc79e7443537230a506 | [
"Apache-2.0"
] | null | null | null | from django.contrib.auth.models import User, AbstractUser
from django.db import models
# Create your models here.
class MyUser(AbstractUser):
phone = models.CharField(
max_length=20,
verbose_name='手机号',
unique=True
)
is_active = models.BooleanField(
default=False
)
email = models.CharField(
unique=True,
max_length=50
) | 20.789474 | 57 | 0.643038 |
7f3f3fffb5d2d2052d2abea522a3b5bdb167af39 | 53,799 | py | Python | .buildozer/android/platform/build-armeabi-v7a/build/other_builds/python3-libffi-openssl-sqlite3/armeabi-v7a__ndk_target_21/python3/Lib/test/test_warnings/__init__.py | VPetras/mobile-test-app | 6708dade6873ae2fb1ecb13aa70662f95fb42dc6 | [
"MIT"
] | 3 | 2019-06-25T22:14:51.000Z | 2021-07-31T23:09:42.000Z | .buildozer/android/platform/build-armeabi-v7a/build/other_builds/python3-libffi-openssl-sqlite3/armeabi-v7a__ndk_target_21/python3/Lib/test/test_warnings/__init__.py | VPetras/mobile-test-app | 6708dade6873ae2fb1ecb13aa70662f95fb42dc6 | [
"MIT"
] | 6 | 2020-01-31T18:04:48.000Z | 2021-06-05T10:53:55.000Z | .buildozer/android/platform/build-armeabi-v7a/build/other_builds/python3-libffi-openssl-sqlite3/armeabi-v7a__ndk_target_21/python3/Lib/test/test_warnings/__init__.py | VPetras/mobile-test-app | 6708dade6873ae2fb1ecb13aa70662f95fb42dc6 | [
"MIT"
] | 1 | 2019-09-30T23:47:05.000Z | 2019-09-30T23:47:05.000Z | from contextlib import contextmanager
import linecache
import os
from io import StringIO
import re
import sys
import textwrap
import unittest
from test import support
from test.support.script_helper import assert_python_ok, assert_python_failure
from test.test_warnings.data import stacklevel as warning_tests
import warnings as original_warnings
py_warnings = support.import_fresh_module('warnings', blocked=['_warnings'])
c_warnings = support.import_fresh_module('warnings', fresh=['_warnings'])
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
@contextmanager
def warnings_state(module):
"""Use a specific warnings implementation in warning_tests."""
global __warningregistry__
for to_clear in (sys, warning_tests):
try:
to_clear.__warningregistry__.clear()
except AttributeError:
pass
try:
__warningregistry__.clear()
except NameError:
pass
original_warnings = warning_tests.warnings
original_filters = module.filters
try:
module.filters = original_filters[:]
module.simplefilter("once")
warning_tests.warnings = module
yield
finally:
warning_tests.warnings = original_warnings
module.filters = original_filters
class BaseTest:
"""Basic bookkeeping required for testing."""
def setUp(self):
self.old_unittest_module = unittest.case.warnings
# The __warningregistry__ needs to be in a pristine state for tests
# to work properly.
if '__warningregistry__' in globals():
del globals()['__warningregistry__']
if hasattr(warning_tests, '__warningregistry__'):
del warning_tests.__warningregistry__
if hasattr(sys, '__warningregistry__'):
del sys.__warningregistry__
# The 'warnings' module must be explicitly set so that the proper
# interaction between _warnings and 'warnings' can be controlled.
sys.modules['warnings'] = self.module
# Ensure that unittest.TestCase.assertWarns() uses the same warnings
# module than warnings.catch_warnings(). Otherwise,
# warnings.catch_warnings() will be unable to remove the added filter.
unittest.case.warnings = self.module
super(BaseTest, self).setUp()
def tearDown(self):
sys.modules['warnings'] = original_warnings
unittest.case.warnings = self.old_unittest_module
super(BaseTest, self).tearDown()
class PublicAPITests(BaseTest):
"""Ensures that the correct values are exposed in the
public API.
"""
def test_module_all_attribute(self):
self.assertTrue(hasattr(self.module, '__all__'))
target_api = ["warn", "warn_explicit", "showwarning",
"formatwarning", "filterwarnings", "simplefilter",
"resetwarnings", "catch_warnings"]
self.assertSetEqual(set(self.module.__all__),
set(target_api))
class CPublicAPITests(PublicAPITests, unittest.TestCase):
module = c_warnings
class PyPublicAPITests(PublicAPITests, unittest.TestCase):
module = py_warnings
class FilterTests(BaseTest):
"""Testing the filtering functionality."""
def test_error(self):
with original_warnings.catch_warnings(module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("error", category=UserWarning)
self.assertRaises(UserWarning, self.module.warn,
"FilterTests.test_error")
def test_error_after_default(self):
with original_warnings.catch_warnings(module=self.module) as w:
self.module.resetwarnings()
message = "FilterTests.test_ignore_after_default"
def f():
self.module.warn(message, UserWarning)
with support.captured_stderr() as stderr:
f()
stderr = stderr.getvalue()
self.assertIn("UserWarning: FilterTests.test_ignore_after_default",
stderr)
self.assertIn("self.module.warn(message, UserWarning)",
stderr)
self.module.filterwarnings("error", category=UserWarning)
self.assertRaises(UserWarning, f)
def test_ignore(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("ignore", category=UserWarning)
self.module.warn("FilterTests.test_ignore", UserWarning)
self.assertEqual(len(w), 0)
self.assertEqual(list(__warningregistry__), ['version'])
def test_ignore_after_default(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
message = "FilterTests.test_ignore_after_default"
def f():
self.module.warn(message, UserWarning)
f()
self.module.filterwarnings("ignore", category=UserWarning)
f()
f()
self.assertEqual(len(w), 1)
def test_always(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("always", category=UserWarning)
message = "FilterTests.test_always"
def f():
self.module.warn(message, UserWarning)
f()
self.assertEqual(len(w), 1)
self.assertEqual(w[-1].message.args[0], message)
f()
self.assertEqual(len(w), 2)
self.assertEqual(w[-1].message.args[0], message)
def test_always_after_default(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
message = "FilterTests.test_always_after_ignore"
def f():
self.module.warn(message, UserWarning)
f()
self.assertEqual(len(w), 1)
self.assertEqual(w[-1].message.args[0], message)
f()
self.assertEqual(len(w), 1)
self.module.filterwarnings("always", category=UserWarning)
f()
self.assertEqual(len(w), 2)
self.assertEqual(w[-1].message.args[0], message)
f()
self.assertEqual(len(w), 3)
self.assertEqual(w[-1].message.args[0], message)
def test_default(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("default", category=UserWarning)
message = UserWarning("FilterTests.test_default")
for x in range(2):
self.module.warn(message, UserWarning)
if x == 0:
self.assertEqual(w[-1].message, message)
del w[:]
elif x == 1:
self.assertEqual(len(w), 0)
else:
raise ValueError("loop variant unhandled")
def test_module(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("module", category=UserWarning)
message = UserWarning("FilterTests.test_module")
self.module.warn(message, UserWarning)
self.assertEqual(w[-1].message, message)
del w[:]
self.module.warn(message, UserWarning)
self.assertEqual(len(w), 0)
def test_once(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("once", category=UserWarning)
message = UserWarning("FilterTests.test_once")
self.module.warn_explicit(message, UserWarning, "__init__.py",
42)
self.assertEqual(w[-1].message, message)
del w[:]
self.module.warn_explicit(message, UserWarning, "__init__.py",
13)
self.assertEqual(len(w), 0)
self.module.warn_explicit(message, UserWarning, "test_warnings2.py",
42)
self.assertEqual(len(w), 0)
def test_module_globals(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.simplefilter("always", UserWarning)
# bpo-33509: module_globals=None must not crash
self.module.warn_explicit('msg', UserWarning, "filename", 42,
module_globals=None)
self.assertEqual(len(w), 1)
# Invalid module_globals type
with self.assertRaises(TypeError):
self.module.warn_explicit('msg', UserWarning, "filename", 42,
module_globals=True)
self.assertEqual(len(w), 1)
# Empty module_globals
self.module.warn_explicit('msg', UserWarning, "filename", 42,
module_globals={})
self.assertEqual(len(w), 2)
def test_inheritance(self):
with original_warnings.catch_warnings(module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("error", category=Warning)
self.assertRaises(UserWarning, self.module.warn,
"FilterTests.test_inheritance", UserWarning)
def test_ordering(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("ignore", category=UserWarning)
self.module.filterwarnings("error", category=UserWarning,
append=True)
del w[:]
try:
self.module.warn("FilterTests.test_ordering", UserWarning)
except UserWarning:
self.fail("order handling for actions failed")
self.assertEqual(len(w), 0)
def test_filterwarnings(self):
# Test filterwarnings().
# Implicitly also tests resetwarnings().
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.filterwarnings("error", "", Warning, "", 0)
self.assertRaises(UserWarning, self.module.warn, 'convert to error')
self.module.resetwarnings()
text = 'handle normally'
self.module.warn(text)
self.assertEqual(str(w[-1].message), text)
self.assertIs(w[-1].category, UserWarning)
self.module.filterwarnings("ignore", "", Warning, "", 0)
text = 'filtered out'
self.module.warn(text)
self.assertNotEqual(str(w[-1].message), text)
self.module.resetwarnings()
self.module.filterwarnings("error", "hex*", Warning, "", 0)
self.assertRaises(UserWarning, self.module.warn, 'hex/oct')
text = 'nonmatching text'
self.module.warn(text)
self.assertEqual(str(w[-1].message), text)
self.assertIs(w[-1].category, UserWarning)
def test_message_matching(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.simplefilter("ignore", UserWarning)
self.module.filterwarnings("error", "match", UserWarning)
self.assertRaises(UserWarning, self.module.warn, "match")
self.assertRaises(UserWarning, self.module.warn, "match prefix")
self.module.warn("suffix match")
self.assertEqual(w, [])
self.module.warn("something completely different")
self.assertEqual(w, [])
def test_mutate_filter_list(self):
class X:
def match(self, a):
L[:] = []
L = [("default",X(),UserWarning,X(),0) for i in range(2)]
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.filters = L
self.module.warn_explicit(UserWarning("b"), None, "f.py", 42)
self.assertEqual(str(w[-1].message), "b")
def test_filterwarnings_duplicate_filters(self):
with original_warnings.catch_warnings(module=self.module):
self.module.resetwarnings()
self.module.filterwarnings("error", category=UserWarning)
self.assertEqual(len(self.module.filters), 1)
self.module.filterwarnings("ignore", category=UserWarning)
self.module.filterwarnings("error", category=UserWarning)
self.assertEqual(
len(self.module.filters), 2,
"filterwarnings inserted duplicate filter"
)
self.assertEqual(
self.module.filters[0][0], "error",
"filterwarnings did not promote filter to "
"the beginning of list"
)
def test_simplefilter_duplicate_filters(self):
with original_warnings.catch_warnings(module=self.module):
self.module.resetwarnings()
self.module.simplefilter("error", category=UserWarning)
self.assertEqual(len(self.module.filters), 1)
self.module.simplefilter("ignore", category=UserWarning)
self.module.simplefilter("error", category=UserWarning)
self.assertEqual(
len(self.module.filters), 2,
"simplefilter inserted duplicate filter"
)
self.assertEqual(
self.module.filters[0][0], "error",
"simplefilter did not promote filter to the beginning of list"
)
def test_append_duplicate(self):
with original_warnings.catch_warnings(module=self.module,
record=True) as w:
self.module.resetwarnings()
self.module.simplefilter("ignore")
self.module.simplefilter("error", append=True)
self.module.simplefilter("ignore", append=True)
self.module.warn("test_append_duplicate", category=UserWarning)
self.assertEqual(len(self.module.filters), 2,
"simplefilter inserted duplicate filter"
)
self.assertEqual(len(w), 0,
"appended duplicate changed order of filters"
)
class CFilterTests(FilterTests, unittest.TestCase):
module = c_warnings
class PyFilterTests(FilterTests, unittest.TestCase):
module = py_warnings
class WarnTests(BaseTest):
"""Test warnings.warn() and warnings.warn_explicit()."""
def test_message(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.simplefilter("once")
for i in range(4):
text = 'multi %d' %i # Different text on each call.
self.module.warn(text)
self.assertEqual(str(w[-1].message), text)
self.assertIs(w[-1].category, UserWarning)
# Issue 3639
def test_warn_nonstandard_types(self):
# warn() should handle non-standard types without issue.
for ob in (Warning, None, 42):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.simplefilter("once")
self.module.warn(ob)
# Don't directly compare objects since
# ``Warning() != Warning()``.
self.assertEqual(str(w[-1].message), str(UserWarning(ob)))
def test_filename(self):
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner("spam1")
self.assertEqual(os.path.basename(w[-1].filename),
"stacklevel.py")
warning_tests.outer("spam2")
self.assertEqual(os.path.basename(w[-1].filename),
"stacklevel.py")
def test_stacklevel(self):
# Test stacklevel argument
# make sure all messages are different, so the warning won't be skipped
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner("spam3", stacklevel=1)
self.assertEqual(os.path.basename(w[-1].filename),
"stacklevel.py")
warning_tests.outer("spam4", stacklevel=1)
self.assertEqual(os.path.basename(w[-1].filename),
"stacklevel.py")
warning_tests.inner("spam5", stacklevel=2)
self.assertEqual(os.path.basename(w[-1].filename),
"__init__.py")
warning_tests.outer("spam6", stacklevel=2)
self.assertEqual(os.path.basename(w[-1].filename),
"stacklevel.py")
warning_tests.outer("spam6.5", stacklevel=3)
self.assertEqual(os.path.basename(w[-1].filename),
"__init__.py")
warning_tests.inner("spam7", stacklevel=9999)
self.assertEqual(os.path.basename(w[-1].filename),
"sys")
def test_stacklevel_import(self):
# Issue #24305: With stacklevel=2, module-level warnings should work.
support.unload('test.test_warnings.data.import_warning')
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.simplefilter('always')
import test.test_warnings.data.import_warning
self.assertEqual(len(w), 1)
self.assertEqual(w[0].filename, __file__)
def test_missing_filename_not_main(self):
# If __file__ is not specified and __main__ is not the module name,
# then __file__ should be set to the module name.
filename = warning_tests.__file__
try:
del warning_tests.__file__
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner("spam8", stacklevel=1)
self.assertEqual(w[-1].filename, warning_tests.__name__)
finally:
warning_tests.__file__ = filename
@unittest.skipUnless(hasattr(sys, 'argv'), 'test needs sys.argv')
def test_missing_filename_main_with_argv(self):
# If __file__ is not specified and the caller is __main__ and sys.argv
# exists, then use sys.argv[0] as the file.
filename = warning_tests.__file__
module_name = warning_tests.__name__
try:
del warning_tests.__file__
warning_tests.__name__ = '__main__'
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner('spam9', stacklevel=1)
self.assertEqual(w[-1].filename, sys.argv[0])
finally:
warning_tests.__file__ = filename
warning_tests.__name__ = module_name
def test_missing_filename_main_without_argv(self):
# If __file__ is not specified, the caller is __main__, and sys.argv
# is not set, then '__main__' is the file name.
filename = warning_tests.__file__
module_name = warning_tests.__name__
argv = sys.argv
try:
del warning_tests.__file__
warning_tests.__name__ = '__main__'
del sys.argv
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner('spam10', stacklevel=1)
self.assertEqual(w[-1].filename, '__main__')
finally:
warning_tests.__file__ = filename
warning_tests.__name__ = module_name
sys.argv = argv
def test_missing_filename_main_with_argv_empty_string(self):
# If __file__ is not specified, the caller is __main__, and sys.argv[0]
# is the empty string, then '__main__ is the file name.
# Tests issue 2743.
file_name = warning_tests.__file__
module_name = warning_tests.__name__
argv = sys.argv
try:
del warning_tests.__file__
warning_tests.__name__ = '__main__'
sys.argv = ['']
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner('spam11', stacklevel=1)
self.assertEqual(w[-1].filename, '__main__')
finally:
warning_tests.__file__ = file_name
warning_tests.__name__ = module_name
sys.argv = argv
def test_warn_explicit_non_ascii_filename(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("always", category=UserWarning)
for filename in ("nonascii\xe9\u20ac", "surrogate\udc80"):
try:
os.fsencode(filename)
except UnicodeEncodeError:
continue
self.module.warn_explicit("text", UserWarning, filename, 1)
self.assertEqual(w[-1].filename, filename)
def test_warn_explicit_type_errors(self):
# warn_explicit() should error out gracefully if it is given objects
# of the wrong types.
# lineno is expected to be an integer.
self.assertRaises(TypeError, self.module.warn_explicit,
None, UserWarning, None, None)
# Either 'message' needs to be an instance of Warning or 'category'
# needs to be a subclass.
self.assertRaises(TypeError, self.module.warn_explicit,
None, None, None, 1)
# 'registry' must be a dict or None.
self.assertRaises((TypeError, AttributeError),
self.module.warn_explicit,
None, Warning, None, 1, registry=42)
def test_bad_str(self):
# issue 6415
# Warnings instance with a bad format string for __str__ should not
# trigger a bus error.
class BadStrWarning(Warning):
"""Warning with a bad format string for __str__."""
def __str__(self):
return ("A bad formatted string %(err)" %
{"err" : "there is no %(err)s"})
with self.assertRaises(ValueError):
self.module.warn(BadStrWarning())
def test_warning_classes(self):
class MyWarningClass(Warning):
pass
class NonWarningSubclass:
pass
# passing a non-subclass of Warning should raise a TypeError
with self.assertRaises(TypeError) as cm:
self.module.warn('bad warning category', '')
self.assertIn('category must be a Warning subclass, not ',
str(cm.exception))
with self.assertRaises(TypeError) as cm:
self.module.warn('bad warning category', NonWarningSubclass)
self.assertIn('category must be a Warning subclass, not ',
str(cm.exception))
# check that warning instances also raise a TypeError
with self.assertRaises(TypeError) as cm:
self.module.warn('bad warning category', MyWarningClass())
self.assertIn('category must be a Warning subclass, not ',
str(cm.exception))
with original_warnings.catch_warnings(module=self.module):
self.module.resetwarnings()
self.module.filterwarnings('default')
with self.assertWarns(MyWarningClass) as cm:
self.module.warn('good warning category', MyWarningClass)
self.assertEqual('good warning category', str(cm.warning))
with self.assertWarns(UserWarning) as cm:
self.module.warn('good warning category', None)
self.assertEqual('good warning category', str(cm.warning))
with self.assertWarns(MyWarningClass) as cm:
self.module.warn('good warning category', MyWarningClass)
self.assertIsInstance(cm.warning, Warning)
class CWarnTests(WarnTests, unittest.TestCase):
module = c_warnings
# As an early adopter, we sanity check the
# test.support.import_fresh_module utility function
def test_accelerated(self):
self.assertIsNot(original_warnings, self.module)
self.assertFalse(hasattr(self.module.warn, '__code__'))
class PyWarnTests(WarnTests, unittest.TestCase):
module = py_warnings
# As an early adopter, we sanity check the
# test.support.import_fresh_module utility function
def test_pure_python(self):
self.assertIsNot(original_warnings, self.module)
self.assertTrue(hasattr(self.module.warn, '__code__'))
class WCmdLineTests(BaseTest):
def test_improper_input(self):
# Uses the private _setoption() function to test the parsing
# of command-line warning arguments
with original_warnings.catch_warnings(module=self.module):
self.assertRaises(self.module._OptionError,
self.module._setoption, '1:2:3:4:5:6')
self.assertRaises(self.module._OptionError,
self.module._setoption, 'bogus::Warning')
self.assertRaises(self.module._OptionError,
self.module._setoption, 'ignore:2::4:-5')
self.module._setoption('error::Warning::0')
self.assertRaises(UserWarning, self.module.warn, 'convert to error')
class CWCmdLineTests(WCmdLineTests, unittest.TestCase):
module = c_warnings
class PyWCmdLineTests(WCmdLineTests, unittest.TestCase):
module = py_warnings
def test_improper_option(self):
# Same as above, but check that the message is printed out when
# the interpreter is executed. This also checks that options are
# actually parsed at all.
rc, out, err = assert_python_ok("-Wxxx", "-c", "pass")
self.assertIn(b"Invalid -W option ignored: invalid action: 'xxx'", err)
def test_warnings_bootstrap(self):
# Check that the warnings module does get loaded when -W<some option>
# is used (see issue #10372 for an example of silent bootstrap failure).
rc, out, err = assert_python_ok("-Wi", "-c",
"import sys; sys.modules['warnings'].warn('foo', RuntimeWarning)")
# '-Wi' was observed
self.assertFalse(out.strip())
self.assertNotIn(b'RuntimeWarning', err)
class _WarningsTests(BaseTest, unittest.TestCase):
"""Tests specific to the _warnings module."""
module = c_warnings
def test_filter(self):
# Everything should function even if 'filters' is not in warnings.
with original_warnings.catch_warnings(module=self.module) as w:
self.module.filterwarnings("error", "", Warning, "", 0)
self.assertRaises(UserWarning, self.module.warn,
'convert to error')
del self.module.filters
self.assertRaises(UserWarning, self.module.warn,
'convert to error')
def test_onceregistry(self):
# Replacing or removing the onceregistry should be okay.
global __warningregistry__
message = UserWarning('onceregistry test')
try:
original_registry = self.module.onceregistry
__warningregistry__ = {}
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("once", category=UserWarning)
self.module.warn_explicit(message, UserWarning, "file", 42)
self.assertEqual(w[-1].message, message)
del w[:]
self.module.warn_explicit(message, UserWarning, "file", 42)
self.assertEqual(len(w), 0)
# Test the resetting of onceregistry.
self.module.onceregistry = {}
__warningregistry__ = {}
self.module.warn('onceregistry test')
self.assertEqual(w[-1].message.args, message.args)
# Removal of onceregistry is okay.
del w[:]
del self.module.onceregistry
__warningregistry__ = {}
self.module.warn_explicit(message, UserWarning, "file", 42)
self.assertEqual(len(w), 0)
finally:
self.module.onceregistry = original_registry
def test_default_action(self):
# Replacing or removing defaultaction should be okay.
message = UserWarning("defaultaction test")
original = self.module.defaultaction
try:
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
registry = {}
self.module.warn_explicit(message, UserWarning, "<test>", 42,
registry=registry)
self.assertEqual(w[-1].message, message)
self.assertEqual(len(w), 1)
# One actual registry key plus the "version" key
self.assertEqual(len(registry), 2)
self.assertIn("version", registry)
del w[:]
# Test removal.
del self.module.defaultaction
__warningregistry__ = {}
registry = {}
self.module.warn_explicit(message, UserWarning, "<test>", 43,
registry=registry)
self.assertEqual(w[-1].message, message)
self.assertEqual(len(w), 1)
self.assertEqual(len(registry), 2)
del w[:]
# Test setting.
self.module.defaultaction = "ignore"
__warningregistry__ = {}
registry = {}
self.module.warn_explicit(message, UserWarning, "<test>", 44,
registry=registry)
self.assertEqual(len(w), 0)
finally:
self.module.defaultaction = original
def test_showwarning_missing(self):
# Test that showwarning() missing is okay.
text = 'del showwarning test'
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings("always", category=UserWarning)
del self.module.showwarning
with support.captured_output('stderr') as stream:
self.module.warn(text)
result = stream.getvalue()
self.assertIn(text, result)
def test_showwarnmsg_missing(self):
# Test that _showwarnmsg() missing is okay.
text = 'del _showwarnmsg test'
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings("always", category=UserWarning)
show = self.module._showwarnmsg
try:
del self.module._showwarnmsg
with support.captured_output('stderr') as stream:
self.module.warn(text)
result = stream.getvalue()
finally:
self.module._showwarnmsg = show
self.assertIn(text, result)
def test_showwarning_not_callable(self):
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings("always", category=UserWarning)
self.module.showwarning = print
with support.captured_output('stdout'):
self.module.warn('Warning!')
self.module.showwarning = 23
self.assertRaises(TypeError, self.module.warn, "Warning!")
def test_show_warning_output(self):
# With showarning() missing, make sure that output is okay.
text = 'test show_warning'
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings("always", category=UserWarning)
del self.module.showwarning
with support.captured_output('stderr') as stream:
warning_tests.inner(text)
result = stream.getvalue()
self.assertEqual(result.count('\n'), 2,
"Too many newlines in %r" % result)
first_line, second_line = result.split('\n', 1)
expected_file = os.path.splitext(warning_tests.__file__)[0] + '.py'
first_line_parts = first_line.rsplit(':', 3)
path, line, warning_class, message = first_line_parts
line = int(line)
self.assertEqual(expected_file, path)
self.assertEqual(warning_class, ' ' + UserWarning.__name__)
self.assertEqual(message, ' ' + text)
expected_line = ' ' + linecache.getline(path, line).strip() + '\n'
assert expected_line
self.assertEqual(second_line, expected_line)
def test_filename_none(self):
# issue #12467: race condition if a warning is emitted at shutdown
globals_dict = globals()
oldfile = globals_dict['__file__']
try:
catch = original_warnings.catch_warnings(record=True,
module=self.module)
with catch as w:
self.module.filterwarnings("always", category=UserWarning)
globals_dict['__file__'] = None
original_warnings.warn('test', UserWarning)
self.assertTrue(len(w))
finally:
globals_dict['__file__'] = oldfile
def test_stderr_none(self):
rc, stdout, stderr = assert_python_ok("-c",
"import sys; sys.stderr = None; "
"import warnings; warnings.simplefilter('always'); "
"warnings.warn('Warning!')")
self.assertEqual(stdout, b'')
self.assertNotIn(b'Warning!', stderr)
self.assertNotIn(b'Error', stderr)
def test_issue31285(self):
# warn_explicit() should neither raise a SystemError nor cause an
# assertion failure, in case the return value of get_source() has a
# bad splitlines() method.
def get_bad_loader(splitlines_ret_val):
class BadLoader:
def get_source(self, fullname):
class BadSource(str):
def splitlines(self):
return splitlines_ret_val
return BadSource('spam')
return BadLoader()
wmod = self.module
with original_warnings.catch_warnings(module=wmod):
wmod.filterwarnings('default', category=UserWarning)
with support.captured_stderr() as stderr:
wmod.warn_explicit(
'foo', UserWarning, 'bar', 1,
module_globals={'__loader__': get_bad_loader(42),
'__name__': 'foobar'})
self.assertIn('UserWarning: foo', stderr.getvalue())
show = wmod._showwarnmsg
try:
del wmod._showwarnmsg
with support.captured_stderr() as stderr:
wmod.warn_explicit(
'eggs', UserWarning, 'bar', 1,
module_globals={'__loader__': get_bad_loader([42]),
'__name__': 'foobar'})
self.assertIn('UserWarning: eggs', stderr.getvalue())
finally:
wmod._showwarnmsg = show
@support.cpython_only
def test_issue31411(self):
# warn_explicit() shouldn't raise a SystemError in case
# warnings.onceregistry isn't a dictionary.
wmod = self.module
with original_warnings.catch_warnings(module=wmod):
wmod.filterwarnings('once')
with support.swap_attr(wmod, 'onceregistry', None):
with self.assertRaises(TypeError):
wmod.warn_explicit('foo', Warning, 'bar', 1, registry=None)
@support.cpython_only
def test_issue31416(self):
# warn_explicit() shouldn't cause an assertion failure in case of a
# bad warnings.filters or warnings.defaultaction.
wmod = self.module
with original_warnings.catch_warnings(module=wmod):
wmod.filters = [(None, None, Warning, None, 0)]
with self.assertRaises(TypeError):
wmod.warn_explicit('foo', Warning, 'bar', 1)
wmod.filters = []
with support.swap_attr(wmod, 'defaultaction', None), \
self.assertRaises(TypeError):
wmod.warn_explicit('foo', Warning, 'bar', 1)
@support.cpython_only
def test_issue31566(self):
# warn() shouldn't cause an assertion failure in case of a bad
# __name__ global.
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings('error', category=UserWarning)
with support.swap_item(globals(), '__name__', b'foo'), \
support.swap_item(globals(), '__file__', None):
self.assertRaises(UserWarning, self.module.warn, 'bar')
class WarningsDisplayTests(BaseTest):
"""Test the displaying of warnings and the ability to overload functions
related to displaying warnings."""
def test_formatwarning(self):
message = "msg"
category = Warning
file_name = os.path.splitext(warning_tests.__file__)[0] + '.py'
line_num = 3
file_line = linecache.getline(file_name, line_num).strip()
format = "%s:%s: %s: %s\n %s\n"
expect = format % (file_name, line_num, category.__name__, message,
file_line)
self.assertEqual(expect, self.module.formatwarning(message,
category, file_name, line_num))
# Test the 'line' argument.
file_line += " for the win!"
expect = format % (file_name, line_num, category.__name__, message,
file_line)
self.assertEqual(expect, self.module.formatwarning(message,
category, file_name, line_num, file_line))
def test_showwarning(self):
file_name = os.path.splitext(warning_tests.__file__)[0] + '.py'
line_num = 3
expected_file_line = linecache.getline(file_name, line_num).strip()
message = 'msg'
category = Warning
file_object = StringIO()
expect = self.module.formatwarning(message, category, file_name,
line_num)
self.module.showwarning(message, category, file_name, line_num,
file_object)
self.assertEqual(file_object.getvalue(), expect)
# Test 'line' argument.
expected_file_line += "for the win!"
expect = self.module.formatwarning(message, category, file_name,
line_num, expected_file_line)
file_object = StringIO()
self.module.showwarning(message, category, file_name, line_num,
file_object, expected_file_line)
self.assertEqual(expect, file_object.getvalue())
class CWarningsDisplayTests(WarningsDisplayTests, unittest.TestCase):
module = c_warnings
class PyWarningsDisplayTests(WarningsDisplayTests, unittest.TestCase):
module = py_warnings
def test_tracemalloc(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, 'w') as fp:
fp.write(textwrap.dedent("""
def func():
f = open(__file__)
# Emit ResourceWarning
f = None
func()
"""))
res = assert_python_ok('-Wd', '-X', 'tracemalloc=2', support.TESTFN)
stderr = res.err.decode('ascii', 'replace')
# normalize newlines
stderr = '\n'.join(stderr.splitlines())
stderr = re.sub('<.*>', '<...>', stderr)
expected = textwrap.dedent('''
{fname}:5: ResourceWarning: unclosed file <...>
f = None
Object allocated at (most recent call last):
File "{fname}", lineno 7
func()
File "{fname}", lineno 3
f = open(__file__)
''')
expected = expected.format(fname=support.TESTFN).strip()
self.assertEqual(stderr, expected)
class CatchWarningTests(BaseTest):
"""Test catch_warnings()."""
def test_catch_warnings_restore(self):
wmod = self.module
orig_filters = wmod.filters
orig_showwarning = wmod.showwarning
# Ensure both showwarning and filters are restored when recording
with wmod.catch_warnings(module=wmod, record=True):
wmod.filters = wmod.showwarning = object()
self.assertIs(wmod.filters, orig_filters)
self.assertIs(wmod.showwarning, orig_showwarning)
# Same test, but with recording disabled
with wmod.catch_warnings(module=wmod, record=False):
wmod.filters = wmod.showwarning = object()
self.assertIs(wmod.filters, orig_filters)
self.assertIs(wmod.showwarning, orig_showwarning)
def test_catch_warnings_recording(self):
wmod = self.module
# Ensure warnings are recorded when requested
with wmod.catch_warnings(module=wmod, record=True) as w:
self.assertEqual(w, [])
self.assertIs(type(w), list)
wmod.simplefilter("always")
wmod.warn("foo")
self.assertEqual(str(w[-1].message), "foo")
wmod.warn("bar")
self.assertEqual(str(w[-1].message), "bar")
self.assertEqual(str(w[0].message), "foo")
self.assertEqual(str(w[1].message), "bar")
del w[:]
self.assertEqual(w, [])
# Ensure warnings are not recorded when not requested
orig_showwarning = wmod.showwarning
with wmod.catch_warnings(module=wmod, record=False) as w:
self.assertIsNone(w)
self.assertIs(wmod.showwarning, orig_showwarning)
def test_catch_warnings_reentry_guard(self):
wmod = self.module
# Ensure catch_warnings is protected against incorrect usage
x = wmod.catch_warnings(module=wmod, record=True)
self.assertRaises(RuntimeError, x.__exit__)
with x:
self.assertRaises(RuntimeError, x.__enter__)
# Same test, but with recording disabled
x = wmod.catch_warnings(module=wmod, record=False)
self.assertRaises(RuntimeError, x.__exit__)
with x:
self.assertRaises(RuntimeError, x.__enter__)
def test_catch_warnings_defaults(self):
wmod = self.module
orig_filters = wmod.filters
orig_showwarning = wmod.showwarning
# Ensure default behaviour is not to record warnings
with wmod.catch_warnings(module=wmod) as w:
self.assertIsNone(w)
self.assertIs(wmod.showwarning, orig_showwarning)
self.assertIsNot(wmod.filters, orig_filters)
self.assertIs(wmod.filters, orig_filters)
if wmod is sys.modules['warnings']:
# Ensure the default module is this one
with wmod.catch_warnings() as w:
self.assertIsNone(w)
self.assertIs(wmod.showwarning, orig_showwarning)
self.assertIsNot(wmod.filters, orig_filters)
self.assertIs(wmod.filters, orig_filters)
def test_record_override_showwarning_before(self):
# Issue #28835: If warnings.showwarning() was overridden, make sure
# that catch_warnings(record=True) overrides it again.
text = "This is a warning"
wmod = self.module
my_log = []
def my_logger(message, category, filename, lineno, file=None, line=None):
nonlocal my_log
my_log.append(message)
# Override warnings.showwarning() before calling catch_warnings()
with support.swap_attr(wmod, 'showwarning', my_logger):
with wmod.catch_warnings(module=wmod, record=True) as log:
self.assertIsNot(wmod.showwarning, my_logger)
wmod.simplefilter("always")
wmod.warn(text)
self.assertIs(wmod.showwarning, my_logger)
self.assertEqual(len(log), 1, log)
self.assertEqual(log[0].message.args[0], text)
self.assertEqual(my_log, [])
def test_record_override_showwarning_inside(self):
# Issue #28835: It is possible to override warnings.showwarning()
# in the catch_warnings(record=True) context manager.
text = "This is a warning"
wmod = self.module
my_log = []
def my_logger(message, category, filename, lineno, file=None, line=None):
nonlocal my_log
my_log.append(message)
with wmod.catch_warnings(module=wmod, record=True) as log:
wmod.simplefilter("always")
wmod.showwarning = my_logger
wmod.warn(text)
self.assertEqual(len(my_log), 1, my_log)
self.assertEqual(my_log[0].args[0], text)
self.assertEqual(log, [])
def test_check_warnings(self):
# Explicit tests for the test.support convenience wrapper
wmod = self.module
if wmod is not sys.modules['warnings']:
self.skipTest('module to test is not loaded warnings module')
with support.check_warnings(quiet=False) as w:
self.assertEqual(w.warnings, [])
wmod.simplefilter("always")
wmod.warn("foo")
self.assertEqual(str(w.message), "foo")
wmod.warn("bar")
self.assertEqual(str(w.message), "bar")
self.assertEqual(str(w.warnings[0].message), "foo")
self.assertEqual(str(w.warnings[1].message), "bar")
w.reset()
self.assertEqual(w.warnings, [])
with support.check_warnings():
# defaults to quiet=True without argument
pass
with support.check_warnings(('foo', UserWarning)):
wmod.warn("foo")
with self.assertRaises(AssertionError):
with support.check_warnings(('', RuntimeWarning)):
# defaults to quiet=False with argument
pass
with self.assertRaises(AssertionError):
with support.check_warnings(('foo', RuntimeWarning)):
wmod.warn("foo")
class CCatchWarningTests(CatchWarningTests, unittest.TestCase):
module = c_warnings
class PyCatchWarningTests(CatchWarningTests, unittest.TestCase):
module = py_warnings
class EnvironmentVariableTests(BaseTest):
def test_single_warning(self):
rc, stdout, stderr = assert_python_ok("-c",
"import sys; sys.stdout.write(str(sys.warnoptions))",
PYTHONWARNINGS="ignore::DeprecationWarning",
PYTHONDEVMODE="")
self.assertEqual(stdout, b"['ignore::DeprecationWarning']")
def test_comma_separated_warnings(self):
rc, stdout, stderr = assert_python_ok("-c",
"import sys; sys.stdout.write(str(sys.warnoptions))",
PYTHONWARNINGS="ignore::DeprecationWarning,ignore::UnicodeWarning",
PYTHONDEVMODE="")
self.assertEqual(stdout,
b"['ignore::DeprecationWarning', 'ignore::UnicodeWarning']")
def test_envvar_and_command_line(self):
rc, stdout, stderr = assert_python_ok("-Wignore::UnicodeWarning", "-c",
"import sys; sys.stdout.write(str(sys.warnoptions))",
PYTHONWARNINGS="ignore::DeprecationWarning",
PYTHONDEVMODE="")
self.assertEqual(stdout,
b"['ignore::DeprecationWarning', 'ignore::UnicodeWarning']")
def test_conflicting_envvar_and_command_line(self):
rc, stdout, stderr = assert_python_failure("-Werror::DeprecationWarning", "-c",
"import sys, warnings; sys.stdout.write(str(sys.warnoptions)); "
"warnings.warn('Message', DeprecationWarning)",
PYTHONWARNINGS="default::DeprecationWarning",
PYTHONDEVMODE="")
self.assertEqual(stdout,
b"['default::DeprecationWarning', 'error::DeprecationWarning']")
self.assertEqual(stderr.splitlines(),
[b"Traceback (most recent call last):",
b" File \"<string>\", line 1, in <module>",
b"DeprecationWarning: Message"])
def test_default_filter_configuration(self):
pure_python_api = self.module is py_warnings
if Py_DEBUG:
expected_default_filters = []
else:
if pure_python_api:
main_module_filter = re.compile("__main__")
else:
main_module_filter = "__main__"
expected_default_filters = [
('default', None, DeprecationWarning, main_module_filter, 0),
('ignore', None, DeprecationWarning, None, 0),
('ignore', None, PendingDeprecationWarning, None, 0),
('ignore', None, ImportWarning, None, 0),
('ignore', None, ResourceWarning, None, 0),
]
expected_output = [str(f).encode() for f in expected_default_filters]
if pure_python_api:
# Disable the warnings acceleration module in the subprocess
code = "import sys; sys.modules.pop('warnings', None); sys.modules['_warnings'] = None; "
else:
code = ""
code += "import warnings; [print(f) for f in warnings.filters]"
rc, stdout, stderr = assert_python_ok("-c", code, __isolated=True)
stdout_lines = [line.strip() for line in stdout.splitlines()]
self.maxDiff = None
self.assertEqual(stdout_lines, expected_output)
@unittest.skipUnless(sys.getfilesystemencoding() != 'ascii',
'requires non-ascii filesystemencoding')
def test_nonascii(self):
rc, stdout, stderr = assert_python_ok("-c",
"import sys; sys.stdout.write(str(sys.warnoptions))",
PYTHONIOENCODING="utf-8",
PYTHONWARNINGS="ignore:DeprecaciónWarning",
PYTHONDEVMODE="")
self.assertEqual(stdout,
"['ignore:DeprecaciónWarning']".encode('utf-8'))
class CEnvironmentVariableTests(EnvironmentVariableTests, unittest.TestCase):
module = c_warnings
class PyEnvironmentVariableTests(EnvironmentVariableTests, unittest.TestCase):
module = py_warnings
class BootstrapTest(unittest.TestCase):
def test_issue_8766(self):
# "import encodings" emits a warning whereas the warnings is not loaded
# or not completely loaded (warnings imports indirectly encodings by
# importing linecache) yet
with support.temp_cwd() as cwd, support.temp_cwd('encodings'):
# encodings loaded by initfsencoding()
assert_python_ok('-c', 'pass', PYTHONPATH=cwd)
# Use -W to load warnings module at startup
assert_python_ok('-c', 'pass', '-W', 'always', PYTHONPATH=cwd)
class FinalizationTest(unittest.TestCase):
@support.requires_type_collecting
def test_finalization(self):
# Issue #19421: warnings.warn() should not crash
# during Python finalization
code = """
import warnings
warn = warnings.warn
class A:
def __del__(self):
warn("test")
a=A()
"""
rc, out, err = assert_python_ok("-c", code)
# note: "__main__" filename is not correct, it should be the name
# of the script
self.assertEqual(err.decode(), '__main__:7: UserWarning: test')
def test_late_resource_warning(self):
# Issue #21925: Emitting a ResourceWarning late during the Python
# shutdown must be logged.
expected = b"sys:1: ResourceWarning: unclosed file "
# don't import the warnings module
# (_warnings will try to import it)
code = "f = open(%a)" % __file__
rc, out, err = assert_python_ok("-Wd", "-c", code)
self.assertTrue(err.startswith(expected), ascii(err))
# import the warnings module
code = "import warnings; f = open(%a)" % __file__
rc, out, err = assert_python_ok("-Wd", "-c", code)
self.assertTrue(err.startswith(expected), ascii(err))
def setUpModule():
py_warnings.onceregistry.clear()
c_warnings.onceregistry.clear()
tearDownModule = setUpModule
if __name__ == "__main__":
unittest.main()
| 41.964899 | 101 | 0.599881 |
d7394a8e981308f556b64f7a841fe2a770e2952c | 573 | py | Python | lib/python/tests/test_all.py | onexeno/plow | 5c19c78ce0579f624cc774ac260f3178286ccb07 | [
"Apache-2.0"
] | null | null | null | lib/python/tests/test_all.py | onexeno/plow | 5c19c78ce0579f624cc774ac260f3178286ccb07 | [
"Apache-2.0"
] | 3 | 2020-05-15T21:01:33.000Z | 2021-12-09T20:25:17.000Z | lib/python/tests/test_all.py | onexeno/plow | 5c19c78ce0579f624cc774ac260f3178286ccb07 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import unittest
import logging
logging.basicConfig(level=logging.INFO)
TESTS = []
# rndaemon tests
TESTS += ['.'.join(['plowapp.rndaemon.test', p]) for p in (
'test_profile',
'test_run.TestCommunications',
'test_run.TestResourceManager',
'test_run.TestProcessManager',
)
]
def additional_tests():
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromNames(TESTS))
return suite
if __name__ == "__main__":
suite = additional_tests()
unittest.TextTestRunner(verbosity=2).run(suite)
| 20.464286 | 66 | 0.705061 |
a275513342534c84ebbae58dada0934a5d15d2e1 | 407 | py | Python | coding/learn_python/context_manager/ContextDecorator_demo.py | yatao91/learning_road | e88dc43de98e35922bfc71c222ec71766851e618 | [
"MIT"
] | 3 | 2021-05-25T16:58:52.000Z | 2022-02-05T09:37:17.000Z | coding/learn_python/context_manager/ContextDecorator_demo.py | yataosu/learning_road | e88dc43de98e35922bfc71c222ec71766851e618 | [
"MIT"
] | null | null | null | coding/learn_python/context_manager/ContextDecorator_demo.py | yataosu/learning_road | e88dc43de98e35922bfc71c222ec71766851e618 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
from contextlib import ContextDecorator
class mycontext(ContextDecorator):
def __enter__(self):
print('Starting')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
print('Finishing')
return False
@mycontext()
def function():
print("The bit in the middle")
function()
with mycontext():
print("The bit in the middle")
| 15.653846 | 50 | 0.641278 |
5a7a617f7dd3d3743731613f07b9124e7a143fdf | 7,025 | py | Python | napari/utils/perf/_timers.py | chili-chiu/napari | eb6e672975ce105ac0125f71da3d0970d17cefb9 | [
"BSD-3-Clause"
] | 7 | 2018-07-03T17:35:46.000Z | 2018-11-07T15:48:58.000Z | napari/utils/perf/_timers.py | chili-chiu/napari | eb6e672975ce105ac0125f71da3d0970d17cefb9 | [
"BSD-3-Clause"
] | 120 | 2018-09-04T22:05:13.000Z | 2019-03-02T01:13:57.000Z | napari/utils/perf/_timers.py | chili-chiu/napari | eb6e672975ce105ac0125f71da3d0970d17cefb9 | [
"BSD-3-Clause"
] | 8 | 2018-09-04T21:48:26.000Z | 2019-01-29T04:48:30.000Z | """PerfTimers class and global instance.
"""
import contextlib
import os
from time import perf_counter_ns
from typing import Dict, Optional
from ._event import PerfEvent
from ._stat import Stat
from ._trace_file import PerfTraceFile
USE_PERFMON = os.getenv("NAPARI_PERFMON", "0") != "0"
class PerfTimers:
"""Timers for performance monitoring.
Timers are best added using the perfmon config file, which will
monkey-patch the timers into the code at startup. See
napari.utils.perf._config for details.
The collecting timing information can be used in two ways:
1) Writing a JSON trace file in Chrome's Tracing format.
2) Napari's real-time QtPerformance widget.
Attributes
----------
timers : Dict[str, Stat]
Statistics are kept on each timer.
trace_file : Optional[PerfTraceFile]
The tracing file we are writing to if any.
Notes
-----
Chrome deduces nesting based on the start and end times of each timer. The
chrome://tracing GUI shows the nesting as stacks of colored rectangles.
However our self.timers dictionary and thus our QtPerformance widget do not
currently understand nesting. So if they say two timers each took 1ms, you
can't tell if one called the other or not.
Despite this limitation when the QtPerformance widget reports slow timers it
still gives you a good idea what was slow. And then you can use the
chrome://tracing GUI to see the full story.
"""
def __init__(self):
"""Create PerfTimers."""
# Maps a timer name to one Stat object.
self.timers: Dict[str, Stat] = {}
# Menu item "Debug -> Record Trace File..." starts a trace.
self.trace_file: Optional[PerfTraceFile] = None
def add_event(self, event: PerfEvent) -> None:
"""Add one performance event.
Parameters
----------
event : PerfEvent
Add this event.
"""
# Add event if tracing.
if self.trace_file is not None:
self.trace_file.add_event(event)
if event.phase == "X": # Complete Event
# Update our self.timers (in milliseconds).
name = event.name
duration_ms = event.duration_ms
if name in self.timers:
self.timers[name].add(duration_ms)
else:
self.timers[name] = Stat(duration_ms)
def add_instant_event(self, name: str, **kwargs) -> None:
"""Add one instant event.
Parameters
----------
name : PerfEvent
Add this event.
**kwargs
Arguments to display in the Args section of the Tracing GUI.
"""
now = perf_counter_ns()
self.add_event(PerfEvent(name, now, now, phase="I", **kwargs))
def add_counter_event(self, name: str, **kwargs: Dict[str, float]) -> None:
"""Add one counter event.
Parameters
----------
name : str
The name of this event like "draw".
**kwargs : Dict[str, float]
The individual counters for this event.
Notes
-----
For example add_counter_event("draw", triangles=5, squares=10).
"""
now = perf_counter_ns()
self.add_event(PerfEvent(name, now, now, phase="C", **kwargs))
def clear(self):
"""Clear all timers."""
# After the GUI displays timing information it clears the timers
# so that we start accumulating fresh information.
self.timers.clear()
def start_trace_file(self, path: str) -> None:
"""Start recording a trace file to disk.
Parameters
----------
path : str
Write the trace to this path.
"""
self.trace_file = PerfTraceFile(path)
def stop_trace_file(self) -> None:
"""Stop recording a trace file."""
if self.trace_file is not None:
self.trace_file.close()
self.trace_file = None
@contextlib.contextmanager
def block_timer(
name: str,
category: Optional[str] = None,
print_time: bool = False,
**kwargs,
):
"""Time a block of code.
block_timer can be used when perfmon is disabled. Use perf_timer instead
if you want your timer to do nothing when perfmon is disabled.
Notes
-----
Most of the time you should use the perfmon config file to monkey-patch
perf_timer's into methods and functions. Then you do not need to use
block_timer or perf_timer context objects explicitly at all.
Parameters
----------
name : str
The name of this timer.
category : str
Comma separated categories such has "render,update".
print_time : bool
Print the duration of the timer when it finishes.
**kwargs : dict
Additional keyword arguments for the "args" field of the event.
Examples
--------
.. code-block:: python
with block_timer("draw") as event:
draw_stuff()
print(f"The timer took {event.duration_ms} milliseconds.")
"""
start_ns = perf_counter_ns()
# Pass in start_ns for start and end, we call update_end_ns
# once the block as finished.
event = PerfEvent(name, start_ns, start_ns, category, **kwargs)
yield event
# Update with the real end time.
event.update_end_ns(perf_counter_ns())
if timers:
timers.add_event(event)
if print_time:
print(f"{name} {event.duration_ms:.3f}ms")
def _create_timer():
# The one global instance
timers = PerfTimers()
# perf_timer is enabled
perf_timer = block_timer
def add_instant_event(name: str, **kwargs):
"""Add one instant event.
Parameters
----------
name : PerfEvent
Add this event.
**kwargs
Arguments to display in the Args section of the Chrome Tracing GUI.
"""
timers.add_instant_event(name, **kwargs)
def add_counter_event(name: str, **kwargs: Dict[str, float]):
"""Add one counter event.
Parameters
----------
name : str
The name of this event like "draw".
**kwargs : Dict[str, float]
The individual counters for this event.
Notes
-----
For example add_counter_event("draw", triangles=5, squares=10).
"""
timers.add_counter_event(name, **kwargs)
return timers, perf_timer, add_instant_event, add_counter_event
if USE_PERFMON:
timers, perf_timer, add_instant_event, add_counter_event = _create_timer()
else:
# Make sure no one accesses the timers when they are disabled.
timers = None
def add_instant_event(name: str, **kwargs) -> None:
pass
def add_counter_event(name: str, **kwargs: Dict[str, float]) -> None:
pass
# perf_timer is disabled. Using contextlib.nullcontext did not work.
@contextlib.contextmanager
def perf_timer(name: str, category: Optional[str] = None, **kwargs):
yield
| 29.393305 | 80 | 0.62121 |
2d88cd9613d993b5b6a450a78df3d9f8806e28b7 | 7,830 | py | Python | VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_deployment.py | opnfv/samplevnf | 75597545ef748d7113591cdbfc8d1aaa88cf15cb | [
"Apache-2.0"
] | 19 | 2017-10-13T11:14:19.000Z | 2022-02-13T12:26:42.000Z | VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_deployment.py | opnfv/samplevnf | 75597545ef748d7113591cdbfc8d1aaa88cf15cb | [
"Apache-2.0"
] | 1 | 2022-01-25T12:33:52.000Z | 2022-01-25T12:33:52.000Z | VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_deployment.py | opnfv/samplevnf | 75597545ef748d7113591cdbfc8d1aaa88cf15cb | [
"Apache-2.0"
] | 22 | 2017-09-21T01:54:35.000Z | 2021-11-07T06:40:11.000Z | ##
## Copyright (c) 2019-2020 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
import sys
from kubernetes import client, config
try:
import configparser
except ImportError:
# Python 2.x fallback
import ConfigParser as configparser
import logging
from logging import handlers
from rapid_k8s_pod import Pod
class K8sDeployment:
"""Deployment class to create containers for test execution in Kubernetes
environment.
"""
LOG_FILE_NAME = "createrapidk8s.log"
SSH_PRIVATE_KEY = "./rapid_rsa_key"
SSH_USER = "centos"
POD_YAML_TEMPLATE_FILE_NAME = "pod-rapid.yaml"
_log = None
_create_config = None
_runtime_config = None
_total_number_of_pods = 0
_pods = []
def __init__(self):
# Configure logger
self._log = logging.getLogger("k8srapid")
self._log.setLevel(logging.DEBUG)
console_formatter = logging.Formatter("%(message)s")
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(console_formatter)
file_formatter = logging.Formatter("%(asctime)s - "
"%(levelname)s - "
"%(message)s")
file_handler = logging.handlers.RotatingFileHandler(self.LOG_FILE_NAME,
backupCount=10)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(file_formatter)
self._log.addHandler(file_handler)
self._log.addHandler(console_handler)
# Initialize k8s plugin
config.load_kube_config()
Pod.k8s_CoreV1Api = client.CoreV1Api()
def load_create_config(self, config_file_name):
"""Read and parse configuration file for the test environment.
"""
self._log.info("Loading configuration file %s", config_file_name)
self._create_config = configparser.RawConfigParser()
try:
self._create_config.read(config_file_name)
except Exception as e:
self._log.error("Failed to read config file!\n%s\n" % e)
return -1
# Now parse config file content
# Parse [DEFAULT] section
if self._create_config.has_option("DEFAULT", "total_number_of_pods"):
self._total_number_of_pods = self._create_config.getint(
"DEFAULT", "total_number_of_pods")
else:
self._log.error("No option total_number_of_pods in DEFAULT section")
return -1
self._log.debug("Total number of pods %d" % self._total_number_of_pods)
# Parse [PODx] sections
for i in range(1, int(self._total_number_of_pods) + 1):
# Search for POD name
if self._create_config.has_option("POD%d" % i,
"name"):
pod_name = self._create_config.get(
"POD%d" % i, "name")
else:
pod_name = "pod-rapid-%d" % i
# Search for POD hostname
if self._create_config.has_option("POD%d" % i,
"nodeSelector_hostname"):
pod_nodeselector_hostname = self._create_config.get(
"POD%d" % i, "nodeSelector_hostname")
else:
pod_nodeselector_hostname = None
# Search for POD dataplane static IP
if self._create_config.has_option("POD%d" % i,
"dp_ip"):
pod_dp_ip = self._create_config.get(
"POD%d" % i, "dp_ip")
else:
pod_dp_ip = None
# Search for POD dataplane subnet
if self._create_config.has_option("POD%d" % i,
"dp_subnet"):
pod_dp_subnet = self._create_config.get(
"POD%d" % i, "dp_subnet")
else:
pod_dp_subnet = "24"
pod = Pod(pod_name)
pod.set_nodeselector(pod_nodeselector_hostname)
pod.set_dp_ip(pod_dp_ip)
pod.set_dp_subnet(pod_dp_subnet)
pod.set_id(i)
# Add POD to the list of PODs which need to be created
self._pods.append(pod)
return 0
def create_pods(self):
""" Create test PODs and wait for them to start.
Collect information for tests to run.
"""
self._log.info("Creating PODs...")
# Create PODs using template from yaml file
for pod in self._pods:
self._log.info("Creating POD %s...", pod.get_name())
pod.create_from_yaml(K8sDeployment.POD_YAML_TEMPLATE_FILE_NAME)
# Wait for PODs to start
for pod in self._pods:
pod.wait_for_start()
# Collect information from started PODs for test execution
for pod in self._pods:
pod.set_ssh_credentials(K8sDeployment.SSH_USER, K8sDeployment.SSH_PRIVATE_KEY)
pod.get_sriov_dev_mac()
def save_runtime_config(self, config_file_name):
self._log.info("Saving config %s for runrapid script...",
config_file_name)
self._runtime_config = configparser.RawConfigParser()
# Section [DEFAULT]
# self._runtime_config.set("DEFAULT",
# "total_number_of_test_machines",
# self._total_number_of_pods)
# Section [ssh]
self._runtime_config.add_section("ssh")
self._runtime_config.set("ssh",
"key",
K8sDeployment.SSH_PRIVATE_KEY)
self._runtime_config.set("ssh",
"user",
K8sDeployment.SSH_USER)
# Section [rapid]
self._runtime_config.add_section("rapid")
self._runtime_config.set("rapid",
"total_number_of_machines",
self._total_number_of_pods)
# Export information about each pod
# Sections [Mx]
for pod in self._pods:
self._runtime_config.add_section("M%d" % pod.get_id())
self._runtime_config.set("M%d" % pod.get_id(),
"admin_ip", pod.get_admin_ip())
self._runtime_config.set("M%d" % pod.get_id(),
"dp_mac1", pod.get_dp_mac())
self._runtime_config.set("M%d" % pod.get_id(),
"dp_pci_dev", pod.get_dp_pci_dev())
self._runtime_config.set("M%d" % pod.get_id(),
"dp_ip1", pod.get_dp_ip() + "/" +
pod.get_dp_subnet())
# Section [Varia]
self._runtime_config.add_section("Varia")
self._runtime_config.set("Varia",
"vim",
"kubernetes")
# Write runtime config file
with open(config_file_name, "w") as file:
self._runtime_config.write(file)
def delete_pods(self):
for pod in self._pods:
pod.terminate()
| 37.464115 | 90 | 0.565517 |
d78cb5c8aa922ddaed3d5840193c55bc16dfed3a | 32,837 | py | Python | selectel_dns_api/apis/domains_api.py | nwton/fork_mdsina_selectel-dns-api | 30b02260a3bf86e0fbbafad372292aafb13206ee | [
"Apache-2.0"
] | null | null | null | selectel_dns_api/apis/domains_api.py | nwton/fork_mdsina_selectel-dns-api | 30b02260a3bf86e0fbbafad372292aafb13206ee | [
"Apache-2.0"
] | null | null | null | selectel_dns_api/apis/domains_api.py | nwton/fork_mdsina_selectel-dns-api | 30b02260a3bf86e0fbbafad372292aafb13206ee | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Selectel DNS API
Simple Selectel DNS API.
OpenAPI spec version: 1.0.0
Contact: info@mdsina.ru
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class DomainsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def add_domain(self, body, **kwargs):
"""
Create new domain
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_domain(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param NewDomain body: Domain info for creation (required)
:return: Domain
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.add_domain_with_http_info(body, **kwargs)
else:
(data) = self.add_domain_with_http_info(body, **kwargs)
return data
def add_domain_with_http_info(self, body, **kwargs):
"""
Create new domain
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_domain_with_http_info(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param NewDomain body: Domain info for creation (required)
:return: Domain
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_domain" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_domain`")
collection_formats = {}
resource_path = '/'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Domain',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_domain(self, domain_id, **kwargs):
"""
Deletes a domain
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_domain(domain_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int domain_id: ID of domain to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_domain_with_http_info(domain_id, **kwargs)
else:
(data) = self.delete_domain_with_http_info(domain_id, **kwargs)
return data
def delete_domain_with_http_info(self, domain_id, **kwargs):
"""
Deletes a domain
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_domain_with_http_info(domain_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int domain_id: ID of domain to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['domain_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_domain" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'domain_id' is set
if ('domain_id' not in params) or (params['domain_id'] is None):
raise ValueError("Missing the required parameter `domain_id` when calling `delete_domain`")
collection_formats = {}
resource_path = '/{domain_id}'.replace('{format}', 'json')
path_params = {}
if 'domain_id' in params:
path_params['domain_id'] = params['domain_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_domain_by_id(self, domain_id, **kwargs):
"""
Find domain by ID
Returns a single domain
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_domain_by_id(domain_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int domain_id: ID of domain to return (required)
:return: Domain
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_domain_by_id_with_http_info(domain_id, **kwargs)
else:
(data) = self.get_domain_by_id_with_http_info(domain_id, **kwargs)
return data
def get_domain_by_id_with_http_info(self, domain_id, **kwargs):
"""
Find domain by ID
Returns a single domain
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_domain_by_id_with_http_info(domain_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int domain_id: ID of domain to return (required)
:return: Domain
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['domain_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_domain_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'domain_id' is set
if ('domain_id' not in params) or (params['domain_id'] is None):
raise ValueError("Missing the required parameter `domain_id` when calling `get_domain_by_id`")
collection_formats = {}
resource_path = '/{domain_id}'.replace('{format}', 'json')
path_params = {}
if 'domain_id' in params:
path_params['domain_id'] = params['domain_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Domain',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_domain_by_name(self, domain_name, **kwargs):
"""
Find domain by name
Returns a single domain
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_domain_by_name(domain_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str domain_name: name of domain to return (required)
:return: Domain
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_domain_by_name_with_http_info(domain_name, **kwargs)
else:
(data) = self.get_domain_by_name_with_http_info(domain_name, **kwargs)
return data
def get_domain_by_name_with_http_info(self, domain_name, **kwargs):
"""
Find domain by name
Returns a single domain
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_domain_by_name_with_http_info(domain_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str domain_name: name of domain to return (required)
:return: Domain
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['domain_name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_domain_by_name" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'domain_name' is set
if ('domain_name' not in params) or (params['domain_name'] is None):
raise ValueError("Missing the required parameter `domain_name` when calling `get_domain_by_name`")
collection_formats = {}
resource_path = '/{domain_name}'.replace('{format}', 'json')
path_params = {}
if 'domain_name' in params:
path_params['domain_name'] = params['domain_name']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Domain',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_domain_zone_file(self, domain_id, **kwargs):
"""
Find domain by name
Returns a domain's zone file
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_domain_zone_file(domain_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int domain_id: ID of domain to delete (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_domain_zone_file_with_http_info(domain_id, **kwargs)
else:
(data) = self.get_domain_zone_file_with_http_info(domain_id, **kwargs)
return data
def get_domain_zone_file_with_http_info(self, domain_id, **kwargs):
"""
Find domain by name
Returns a domain's zone file
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_domain_zone_file_with_http_info(domain_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int domain_id: ID of domain to delete (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['domain_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_domain_zone_file" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'domain_id' is set
if ('domain_id' not in params) or (params['domain_id'] is None):
raise ValueError("Missing the required parameter `domain_id` when calling `get_domain_zone_file`")
collection_formats = {}
resource_path = '/{domain_id}/export'.replace('{format}', 'json')
path_params = {}
if 'domain_id' in params:
path_params['domain_id'] = params['domain_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_domains(self, **kwargs):
"""
Getting domains info
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_domains(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Domain]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_domains_with_http_info(**kwargs)
else:
(data) = self.get_domains_with_http_info(**kwargs)
return data
def get_domains_with_http_info(self, **kwargs):
"""
Getting domains info
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_domains_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Domain]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_domains" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Domain]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_domain(self, domain_id, body, **kwargs):
"""
Updates a domain
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_domain(domain_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int domain_id: ID of domain to update (required)
:param UpdatedDomain body: Domain info for update (required)
:return: Domain
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_domain_with_http_info(domain_id, body, **kwargs)
else:
(data) = self.update_domain_with_http_info(domain_id, body, **kwargs)
return data
def update_domain_with_http_info(self, domain_id, body, **kwargs):
"""
Updates a domain
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_domain_with_http_info(domain_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int domain_id: ID of domain to update (required)
:param UpdatedDomain body: Domain info for update (required)
:return: Domain
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['domain_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_domain" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'domain_id' is set
if ('domain_id' not in params) or (params['domain_id'] is None):
raise ValueError("Missing the required parameter `domain_id` when calling `update_domain`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_domain`")
collection_formats = {}
resource_path = '/{domain_id}'.replace('{format}', 'json')
path_params = {}
if 'domain_id' in params:
path_params['domain_id'] = params['domain_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Domain',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 39.23178 | 110 | 0.553187 |
c3460e311c5b6a1c760ee6c9e8309d6263165032 | 660 | py | Python | repository/versions/009_Add_study_attributes.py | LCBRU/no_api_etl | 80272743047f8a36ede4b092150e3b76c2fb0186 | [
"MIT"
] | null | null | null | repository/versions/009_Add_study_attributes.py | LCBRU/no_api_etl | 80272743047f8a36ede4b092150e3b76c2fb0186 | [
"MIT"
] | null | null | null | repository/versions/009_Add_study_attributes.py | LCBRU/no_api_etl | 80272743047f8a36ede4b092150e3b76c2fb0186 | [
"MIT"
] | null | null | null | from sqlalchemy import Table, MetaData, Column, NVARCHAR, Boolean
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
study = Table('edge_study', meta, autoload=True)
is_uhl_lead_centre = Column('is_uhl_lead_centre', Boolean)
is_uhl_lead_centre.create(study)
primary_clinical_management_areas = Column('primary_clinical_management_areas', NVARCHAR(200))
primary_clinical_management_areas.create(study)
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
study = Table('edge_study', meta, autoload=True)
study.c.is_uhl_lead_centre.drop()
study.c.primary_clinical_management_areas.drop()
| 36.666667 | 98 | 0.775758 |
487f7f74001fbd3a128a361f1f41251e87608a1e | 3,112 | py | Python | src/attrbench/metrics/impact_score/impact_score.py | zoeparman/benchmark | 96331b7fa0db84f5f422b52cae2211b41bbd15ce | [
"MIT"
] | null | null | null | src/attrbench/metrics/impact_score/impact_score.py | zoeparman/benchmark | 96331b7fa0db84f5f422b52cae2211b41bbd15ce | [
"MIT"
] | 7 | 2020-03-02T13:03:50.000Z | 2022-03-12T00:16:20.000Z | src/attrbench/metrics/impact_score/impact_score.py | zoeparman/benchmark | 96331b7fa0db84f5f422b52cae2211b41bbd15ce | [
"MIT"
] | null | null | null | from typing import Callable, List
import torch
from torch.nn.functional import softmax, sigmoid
from torch.utils.data import DataLoader
from attrbench.lib.masking import Masker
from attrbench.metrics import Metric
from ._dataset import _ImpactScoreDataset
from .result import ImpactScoreResult
def impact_score(samples: torch.Tensor, labels: torch.Tensor, model: Callable, attrs: torch.tensor, num_steps: int,
strict: bool, masker: Masker, tau: float = None, writer=None):
if not (strict or tau):
raise ValueError("Provide value for tau when calculating non-strict impact score")
counts = []
ds = _ImpactScoreDataset(num_steps, samples, attrs, masker)
dl = DataLoader(ds, shuffle=False, batch_size=1, num_workers=0)
with torch.no_grad():
orig_out = model(samples)
binary_output = orig_out.shape[1]==1
batch_size = samples.size(0)
if binary_output:
orig_confidence = sigmoid(orig_out)
preds = (orig_confidence>0.5)*1.
orig_confidence = (preds-1 + orig_confidence)*torch.sign(preds-0.5) # if prediction ==0 then we want 1-output as the confidence
else:
orig_confidence = softmax(orig_out, dim=1).gather(dim=1, index=labels.view(-1, 1))
device = samples.device
for i, masked_samples in enumerate(dl):
masked_samples = masked_samples[0].to(device).float()
with torch.no_grad():
masked_out = model(masked_samples)
if writer:
writer.add_images("masked samples", masked_samples, global_step=i)
if binary_output:
masked_out = sigmoid(masked_out)
confidence = (preds-1 + masked_out)*torch.sign(preds-0.5)
flipped = (masked_out>0.5)*1. != preds
else:
confidence = softmax(masked_out, dim=1).gather(dim=1, index=labels.view(-1, 1))
flipped = torch.argmax(masked_out, dim=1) != labels
if not strict:
flipped = flipped | (confidence <= orig_confidence * tau).squeeze()
counts.append(flipped.sum().item())
# [len(mask_range)]
result = torch.tensor(counts)
return result, batch_size
class ImpactScore(Metric):
def __init__(self, model: Callable, method_names: List[str], num_steps: int, strict: bool,
masker: Masker, tau: float = None, writer_dir: str = None):
super().__init__(model, method_names, writer_dir)
self.num_steps = num_steps
self.strict = strict
self.masker = masker
self.tau = tau
self._result = ImpactScoreResult(method_names, strict, tau)
def run_batch(self, samples, labels, attrs_dict: dict):
for method_name in attrs_dict:
if method_name not in self.result.method_names:
raise ValueError(f"Invalid method name: {method_name}")
flipped, total = impact_score(samples, labels, self.model, attrs_dict[method_name], self.num_steps,
self.strict, self.masker, self.tau, writer=self._get_writer(method_name))
self.result.append(method_name, (flipped, total))
| 43.830986 | 135 | 0.661311 |
f2bf28298f1e5a1117e87f0876f8ebc4456191d3 | 3,806 | py | Python | selfdrive/car/subaru/carcontroller.py | zairwolf/pilot | e4cb0d26e75db5f2ef39eebe496f3356f9d0245b | [
"MIT"
] | null | null | null | selfdrive/car/subaru/carcontroller.py | zairwolf/pilot | e4cb0d26e75db5f2ef39eebe496f3356f9d0245b | [
"MIT"
] | null | null | null | selfdrive/car/subaru/carcontroller.py | zairwolf/pilot | e4cb0d26e75db5f2ef39eebe496f3356f9d0245b | [
"MIT"
] | null | null | null | #from common.numpy_fast import clip
from selfdrive.car import apply_std_steer_torque_limits
from selfdrive.car.subaru import subarucan
from selfdrive.car.subaru.values import DBC
from opendbc.can.packer import CANPacker
from common.params import Params
params = Params()
from common.dp import get_last_modified
from common.dp import common_controller_update, common_controller_ctrl
class CarControllerParams():
def __init__(self):
self.STEER_MAX = 2047 # max_steer 4095
self.STEER_STEP = 2 # how often we update the steer cmd
self.STEER_DELTA_UP = 50 # torque increase per refresh, 0.8s to max
self.STEER_DELTA_DOWN = 70 # torque decrease per refresh
self.STEER_DRIVER_ALLOWANCE = 60 # allowed driver torque before start limiting
self.STEER_DRIVER_MULTIPLIER = 10 # weight driver torque heavily
self.STEER_DRIVER_FACTOR = 1 # from dbc
class CarController():
def __init__(self, dbc_name, CP, VM):
self.lkas_active = False
self.apply_steer_last = 0
self.es_distance_cnt = -1
self.es_lkas_cnt = -1
self.steer_rate_limited = False
# Setup detection helper. Routes commands to
# an appropriate CAN bus number.
self.params = CarControllerParams()
self.packer = CANPacker(DBC[CP.carFingerprint]['pt'])
# dp
self.dragon_enable_steering_on_signal = False
self.dragon_lat_ctrl = True
self.dp_last_modified = None
self.last_blinker_on = False
self.blinker_end_frame = 0
self.dragon_blinker_off_timer = 0.
def update(self, enabled, CS, frame, actuators, pcm_cancel_cmd, visual_alert, left_line, right_line):
""" Controls thread """
# dp
if frame % 500 == 0:
modified = get_last_modified()
if self.dp_last_modified != modified:
self.dragon_lat_ctrl, \
self.dragon_enable_steering_on_signal, \
self.dragon_blinker_off_timer = common_controller_update()
self.dp_last_modified = modified
P = self.params
# Send CAN commands.
can_sends = []
### STEER ###
if (frame % P.STEER_STEP) == 0:
final_steer = actuators.steer if enabled else 0.
apply_steer = int(round(final_steer * P.STEER_MAX))
# limits due to driver torque
new_steer = int(round(apply_steer))
apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last, CS.out.steeringTorque, P)
self.steer_rate_limited = new_steer != apply_steer
lkas_enabled = enabled
if not lkas_enabled:
apply_steer = 0
# dp
blinker_on = CS.out.leftBlinker or CS.out.rightBlinker
if not enabled:
self.blinker_end_frame = 0
if self.last_blinker_on and not blinker_on:
self.blinker_end_frame = frame + self.dragon_blinker_off_timer
apply_steer = common_controller_ctrl(enabled,
self.dragon_lat_ctrl,
self.dragon_enable_steering_on_signal,
blinker_on or frame < self.blinker_end_frame,
apply_steer)
self.last_blinker_on = blinker_on
can_sends.append(subarucan.create_steering_control(self.packer, apply_steer, frame, P.STEER_STEP))
self.apply_steer_last = apply_steer
if self.es_distance_cnt != CS.es_distance_msg["Counter"]:
can_sends.append(subarucan.create_es_distance(self.packer, CS.es_distance_msg, pcm_cancel_cmd))
self.es_distance_cnt = CS.es_distance_msg["Counter"]
if self.es_lkas_cnt != CS.es_lkas_msg["Counter"]:
can_sends.append(subarucan.create_es_lkas(self.packer, CS.es_lkas_msg, visual_alert, left_line, right_line))
self.es_lkas_cnt = CS.es_lkas_msg["Counter"]
return can_sends
| 35.90566 | 114 | 0.685497 |
7cceb4a46af093dd1c90212c591a98c12d7157d5 | 982 | py | Python | chatroom/migrations/0001_initial.py | MitchellSturba/COMP-4800-FinalProject-Chatroom | 5e795eb4ddd36bb410fd4faf863f1007a9d38f46 | [
"MIT"
] | 3 | 2020-11-29T18:25:22.000Z | 2021-12-24T14:23:57.000Z | chatroom/migrations/0001_initial.py | MitchellSturba/COMP-4800-FinalProject-Chatroom | 5e795eb4ddd36bb410fd4faf863f1007a9d38f46 | [
"MIT"
] | null | null | null | chatroom/migrations/0001_initial.py | MitchellSturba/COMP-4800-FinalProject-Chatroom | 5e795eb4ddd36bb410fd4faf863f1007a9d38f46 | [
"MIT"
] | 3 | 2021-05-29T19:28:27.000Z | 2022-02-26T06:56:02.000Z | # Generated by Django 3.1.3 on 2020-11-04 08:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('room_name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.CharField(max_length=100)),
('sender', models.CharField(max_length=50)),
('room_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='chatroom.room')),
],
),
]
| 30.6875 | 114 | 0.578411 |
0345bfb3ad10c9ef5ad7569ec5a86a73374123db | 100 | py | Python | easy_uploader/apps.py | naritotakizawa/django-easy-uploader | 745c849588cf99618d19f39194396cfd934c1ff5 | [
"MIT"
] | 6 | 2017-06-24T08:35:16.000Z | 2021-02-27T01:52:44.000Z | easy_uploader/apps.py | naritotakizawa/django-easy-uploader | 745c849588cf99618d19f39194396cfd934c1ff5 | [
"MIT"
] | 10 | 2017-06-15T21:07:17.000Z | 2018-02-27T11:48:58.000Z | easy_uploader/apps.py | naritotakizawa/django-easy-uploader | 745c849588cf99618d19f39194396cfd934c1ff5 | [
"MIT"
] | 1 | 2018-02-21T01:30:50.000Z | 2018-02-21T01:30:50.000Z | from django.apps import AppConfig
class EasyUploaderConfig(AppConfig):
name = 'easy_uploader'
| 16.666667 | 36 | 0.78 |
f3b856d9cce004f5a4aaf0f7f8f174a52ac88dfe | 4,158 | py | Python | kubernetes/client/models/v1_ip_block.py | Scalr/kubernetes-client-python | 07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38 | [
"Apache-2.0"
] | 3 | 2019-05-19T05:05:37.000Z | 2020-03-20T04:56:20.000Z | kubernetes/client/models/v1_ip_block.py | Scalr/kubernetes-client-python | 07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_ip_block.py | Scalr/kubernetes-client-python | 07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1IPBlock(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'cidr': 'str',
'_except': 'list[str]'
}
attribute_map = {
'cidr': 'cidr',
'_except': 'except'
}
def __init__(self, cidr=None, _except=None):
"""
V1IPBlock - a model defined in Swagger
"""
self._cidr = None
self.__except = None
self.discriminator = None
self.cidr = cidr
if _except is not None:
self._except = _except
@property
def cidr(self):
"""
Gets the cidr of this V1IPBlock.
CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\"
:return: The cidr of this V1IPBlock.
:rtype: str
"""
return self._cidr
@cidr.setter
def cidr(self, cidr):
"""
Sets the cidr of this V1IPBlock.
CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\"
:param cidr: The cidr of this V1IPBlock.
:type: str
"""
if cidr is None:
raise ValueError("Invalid value for `cidr`, must not be `None`")
self._cidr = cidr
@property
def _except(self):
"""
Gets the _except of this V1IPBlock.
Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" Except values will be rejected if they are outside the CIDR range
:return: The _except of this V1IPBlock.
:rtype: list[str]
"""
return self.__except
@_except.setter
def _except(self, _except):
"""
Sets the _except of this V1IPBlock.
Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" Except values will be rejected if they are outside the CIDR range
:param _except: The _except of this V1IPBlock.
:type: list[str]
"""
self.__except = _except
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1IPBlock):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 26.653846 | 185 | 0.55267 |
6b1bae51421a8700570ba3a732af3198d0e2f58b | 5,050 | py | Python | share/lib/python/neuron/rxdtests/tests/hh_param.py | tommorse/nrn | 73236b12977118ae0a98d7dbbed60973994cdaee | [
"BSD-3-Clause"
] | 1 | 2020-05-28T17:21:52.000Z | 2020-05-28T17:21:52.000Z | share/lib/python/neuron/rxdtests/tests/hh_param.py | tommorse/nrn | 73236b12977118ae0a98d7dbbed60973994cdaee | [
"BSD-3-Clause"
] | 2 | 2019-11-09T23:02:28.000Z | 2019-11-18T00:17:10.000Z | share/lib/python/neuron/rxdtests/tests/hh_param.py | tommorse/nrn | 73236b12977118ae0a98d7dbbed60973994cdaee | [
"BSD-3-Clause"
] | 1 | 2018-12-18T13:52:16.000Z | 2018-12-18T13:52:16.000Z | from neuron import h, crxd as rxd
from neuron.crxd import v
from neuron.crxd.rxdmath import vtrap, exp, log
from math import pi
from matplotlib import pyplot
h.load_file('stdrun.hoc')
# parameters
h.celsius = 6.3
e = 1.60217662e-19
scale = 1e-14/e
gnabar = 0.12*scale # molecules/um2 ms mV
gkbar = 0.036*scale
gl = 0.0003*scale
el = -54.3
q10 = 3.0**((h.celsius - 6.3)/10.0)
# sodium activation 'm'
alpha = 0.1 * vtrap(-(v + 40.0), 10)
beta = 4.0 * exp(-(v + 65)/18.0)
mtau = 1.0/(q10 * (alpha + beta))
minf = alpha/(alpha + beta)
# sodium inactivation 'h'
alpha = 0.07 * exp(-(v + 65.0)/20.0)
beta = 1.0/(exp(-(v + 35.0)/10.0) + 1.0)
htau = 1.0/(q10 * (alpha + beta))
hinf = alpha/(alpha + beta)
# potassium activation 'n'
alpha = 0.01 * vtrap(-(v + 55.0), 10.0)
beta = 0.125 * exp(-(v + 65.0)/80.0)
ntau = 1.0/(q10 * (alpha + beta))
ninf = alpha/(alpha + beta)
somaA = h.Section('somaA')
somaA.pt3dclear()
somaA.pt3dadd(-90,0,0,30)
somaA.pt3dadd(-60,0,0,30)
somaA.nseg = 1
somaB = h.Section('somaB')
somaB.pt3dclear()
somaB.pt3dadd(60,0,0,30)
somaB.pt3dadd(90,0,0,30)
somaB.nseg = 1
# mod version
somaB.insert('hh')
# rxd version
# Where?
# intracellular
cyt = rxd.Region(h.allsec(), name='cyt', nrn_region='i')
# membrane
mem = rxd.Region(h.allsec(), name='cell_mem', geometry = rxd.membrane())
# extracellular
ecs = rxd.Extracellular(-100, -100, -100, 100, 100, 100, dx=33)
# Who?
def init(ics,ecs):
return lambda nd: ecs if isinstance(nd,rxd.node.NodeExtracellular) else ics
# ions
k = rxd.Species([cyt, mem], name='k', d=1, charge=1, initial=54.4, represents='CHEBI:29103')
kecs = rxd.Parameter([ecs], name='k', value=2.5, charge=1, represents='CHEBI:29103')
na = rxd.Species([cyt, mem], name='na', d=1, charge=1, initial=10.0, represents='CHEBI:29101')
naecs = rxd.Parameter([ecs], name='na', value=140, charge=1, represents='CHEBI:29101')
x = rxd.Species([cyt, mem, ecs], name='x', charge=1, initial=1e9)
ki, ko, nai, nao, xi, xo = k[cyt], kecs[ecs], na[cyt], naecs[ecs], x[cyt], x[ecs]
# gates
ngate = rxd.State([cyt, mem], name='ngate', initial=0.24458654944007166)
mgate = rxd.State([cyt, mem], name='mgate', initial=0.028905534475191907)
hgate = rxd.State([cyt, mem], name='hgate', initial=0.7540796658225246)
# somaA parameter
pA = rxd.Parameter([cyt, mem], name='paramA',
initial=lambda nd: 1 if nd.segment in somaA else 0)
#What
# gates
m_gate = rxd.Rate(mgate, (minf - mgate)/mtau)
h_gate = rxd.Rate(hgate, (hinf - hgate)/htau)
n_gate = rxd.Rate(ngate, (ninf - ngate)/ntau)
# Nernst potentials
ena = 1e3*h.R*(h.celsius + 273.15)*log(nao/nai)/h.FARADAY
ek = 1e3*h.R*(h.celsius + 273.15)*log(ko/ki)/h.FARADAY
gna = pA*gnabar*mgate**3*hgate
gk = pA*gkbar*ngate**4
na_current = rxd.MultiCompartmentReaction(nai, nao, gna*(v - ena),
mass_action=False, membrane=mem,
membrane_flux=True)
k_current = rxd.MultiCompartmentReaction(ki, ko, gk*(v - ek),
mass_action=False, membrane=mem,
membrane_flux=True)
leak_current = rxd.MultiCompartmentReaction(xi, xo, pA*gl*(v - el),
mass_action=False, membrane=mem,
membrane_flux=True)
# stimulate
stimA = h.IClamp(somaA(0.5))
stimA.delay = 50
stimA.amp = 1
stimA.dur = 50
stimB = h.IClamp(somaB(0.5))
stimB.delay = 50
stimB.amp = 1
stimB.dur = 50
# record
tvec = h.Vector().record(h._ref_t)
vvecA = h.Vector().record(somaA(0.5)._ref_v)
mvecA = h.Vector().record(mgate.nodes(somaA(0.5))[0]._ref_value)
nvecA = h.Vector().record(ngate.nodes(somaA(0.5))[0]._ref_value)
hvecA = h.Vector().record(hgate.nodes(somaA(0.5))[0]._ref_value)
kvecA = h.Vector().record(somaA(0.5)._ref_ik)
navecA = h.Vector().record(somaA(0.5)._ref_ina)
vvecB = h.Vector().record(somaB(0.5)._ref_v)
kvecB = h.Vector().record(somaB(0.5)._ref_ik)
navecB = h.Vector().record(somaB(0.5)._ref_ina)
mvecB = h.Vector().record(somaB(0.5).hh._ref_m)
nvecB = h.Vector().record(somaB(0.5).hh._ref_n)
hvecB = h.Vector().record(somaB(0.5).hh._ref_h)
tvec = h.Vector().record(h._ref_t)
# run
h.dt=0.025
h.finitialize(-70)
#for i in range(1000):
# h.fadvance()
# print(h.t,somaA(0.5).v, somaA(0.5).ki, somaA(0.5).nai, somaA(0.5).xi)
h.continuerun(100)
# plot the results
pyplot.ion()
fig = pyplot.figure()
pyplot.plot(tvec, vvecA, label="rxd")
pyplot.plot(tvec, vvecB, label="mod")
pyplot.legend()
fig.set_dpi(200)
fig = pyplot.figure()
pyplot.plot(tvec, hvecA, '-b', label='h')
pyplot.plot(tvec, mvecA, '-r', label='m')
pyplot.plot(tvec, nvecA, '-g', label='n')
pyplot.plot(tvec, hvecB, ':b')
pyplot.plot(tvec, mvecB, ':r')
pyplot.plot(tvec, nvecB, ':g')
pyplot.legend()
fig.set_dpi(200)
fig = pyplot.figure()
pyplot.plot(tvec, kvecA.as_numpy(), '-b', label='k')
pyplot.plot(tvec, navecA.as_numpy(), '-r', label='na')
pyplot.plot(tvec, kvecB, ':b')
pyplot.plot(tvec, navecB, ':r')
pyplot.legend()
fig.set_dpi(200)
| 28.055556 | 94 | 0.634851 |
9e1be7c8be64305b88afe9e6f5525bcc75ff289a | 7,424 | py | Python | prepare_dataset/utils.py | dongkwan-kim/SubGNN | 8e3ad75d4a5d0eedb6fd35a94e7810d388444027 | [
"MIT"
] | null | null | null | prepare_dataset/utils.py | dongkwan-kim/SubGNN | 8e3ad75d4a5d0eedb6fd35a94e7810d388444027 | [
"MIT"
] | null | null | null | prepare_dataset/utils.py | dongkwan-kim/SubGNN | 8e3ad75d4a5d0eedb6fd35a94e7810d388444027 | [
"MIT"
] | null | null | null | # General
import random
import numpy as np
# Pytorch
import torch
import torch.nn.functional as F
from torch.nn import Sigmoid
from torch_geometric.data import Dataset
# Matplotlib
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
# Sci-kit Learn
from sklearn.metrics import roc_auc_score, average_precision_score, accuracy_score, f1_score, roc_curve, \
precision_recall_curve
# Global variables
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def calc_loss_both(data, dot_pred):
"""
Calculate loss via link prediction
Args
- data (Data object): graph
- dot_pred (tensor long, shape=(nodes, classes)): predictions calculated from dot product
Return
- loss (float): loss
"""
loss = F.nll_loss(F.log_softmax(dot_pred.to(device), dim=-1), data.y.long())
loss.requires_grad = True
return loss
def el_dot(embed, edges, test=False):
"""
Calculate element-wise dot product for link prediction
Args
- embed (tensor): embedding
- edges (tensor): list of edges
Return
- tensor of element-wise dot product
"""
embed = embed.cpu().detach()
edges = edges.cpu().detach()
source = torch.index_select(embed, 0, edges[0, :])
target = torch.index_select(embed, 0, edges[1, :])
dots = torch.bmm(source.view(edges.shape[1], 1, embed.shape[1]), target.view(edges.shape[1], embed.shape[1], 1))
dots = torch.sigmoid(np.squeeze(dots))
if test: return dots
diff = np.squeeze(torch.ones((1, len(dots))) - dots)
return torch.stack((diff, dots), 1)
def calc_roc_score(pred_all, pos_edges=[], neg_edges=[], true_all=[], save_plots="", loss=[], multi_class=False,
labels=[], multilabel=False):
"""
Calculate ROC score
Args
- pred_all
- pos_edges
- neg_edges
- true_all
- save_plots
- loss
- multi_class
- labels
- multilabel
Return
- roc_auc
- ap_score
- acc
- f1
"""
if multi_class:
if save_plots != "":
class_roc, class_ap, class_f1 = plot_roc_ap(true_all, pred_all, save_plots, loss=loss, labels=labels,
multilabel=multilabel)
roc_auc = roc_auc_score(true_all, pred_all, multi_class='ovr')
if multilabel:
pred_all = (pred_all > 0.5)
else:
true_all = torch.argmax(true_all, axis=1)
pred_all = torch.argmax(torch.tensor(pred_all), axis=1)
f1_micro = f1_score(true_all, pred_all, average="micro")
acc = accuracy_score(true_all, pred_all)
if save_plots != "": return roc_auc, acc, f1_micro, class_roc, class_ap, class_f1
return roc_auc, acc, f1_micro
else:
pred_pos = pred_all[pos_edges]
pred_neg = pred_all[neg_edges]
pred_all = torch.cat((pred_pos, pred_neg), 0).cpu().detach().numpy()
true_all = torch.cat((torch.ones(len(pred_pos)), torch.zeros(len(pred_neg))), 0).cpu().detach().numpy()
roc_auc = roc_auc_score(true_all, pred_all)
ap_score = average_precision_score(true_all, pred_all)
acc = accuracy_score(true_all, (pred_all > 0.5))
f1 = f1_score(true_all, (pred_all > 0.5))
if save_plots != "": plot_roc_ap(true_all, pred_all, save_plots, loss, multilabel=multilabel)
return roc_auc, ap_score, acc, f1
def plot_roc_ap(y_true, y_pred, save_plots, loss={}, labels=[], multilabel=False):
with PdfPages(save_plots) as pdf:
# ROC
fpr = dict()
tpr = dict()
roc = dict()
if len(labels) > 0: # Multiclass classification
for c in range(y_true.shape[1]):
fpr[c], tpr[c], _ = roc_curve(y_true[:, c], y_pred[:, c])
roc[c] = roc_auc_score(y_true[:, c], y_pred[:, c])
plt.plot(fpr[c], tpr[c], label=str(labels[c]) + " (area = {:.5f})".format(roc[c]))
print("[ROC] " + str(labels[c]) + ": {:.5f}".format(roc[c]))
else: # Binary classification
fpr, tpr, _ = roc_curve(y_true, y_pred)
roc = roc_auc_score(y_true, y_pred)
plt.plot(fpr, tpr, label="ROC = {:.5f}".format(roc))
plt.plot([0, 1], [0, 1], linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.legend(loc="best")
plt.title("ROC")
pdf.savefig()
plt.close()
# Precision-Recall curve
precision = dict()
recall = dict()
ap = dict()
if len(labels) > 0: # Multiclass classification
for c in range(y_true.shape[1]):
precision[c], recall[c], _ = precision_recall_curve(y_true[:, c], y_pred[:, c])
ap[c] = average_precision_score(y_true[:, c], y_pred[:, c])
plt.plot(recall[c], precision[c], label=str(labels[c]) + " (area = {:.5f})".format(ap[c]))
print("[AP] " + str(labels[c]) + ": {:.5f}".format(ap[c]))
else: # Binary classification
precision, recall, _ = precision_recall_curve(y_true, y_pred)
ap = average_precision_score(y_true, y_pred)
n_true = sum(y_true) / len(y_true)
plt.plot(recall, precision, label="AP = {:.5f}".format(ap))
plt.plot([0, 1], [n_true, n_true], linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("Recall")
plt.ylabel("Precision")
if len(labels) > 0: plt.legend(loc="best")
plt.title("Precision-recall curve")
pdf.savefig()
plt.close()
# Loss
if len(loss) > 0:
max_epochs = max([len(l) for k, l in loss.items()])
for k, losses in loss.items():
losses_cpu = [_l.detach().cpu() for _l in losses]
plt.plot(np.arange(max_epochs), losses_cpu, label=k)
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.xlim([0, max_epochs])
plt.legend(loc="best")
plt.title("Training Loss")
pdf.savefig()
plt.close()
# F1 score
f1 = []
if len(labels) > 0: # Multiclass classification
if not multilabel:
y_true = torch.argmax(y_true, axis=1)
y_pred = torch.argmax(torch.tensor(y_pred), axis=1)
else:
y_pred = (y_pred > 0.5)
f1 = f1_score(y_true, y_pred, range(len(labels)), average=None)
for c in range(len(f1)):
print("[F1] " + str(labels[c]) + ": {:.5f}".format(f1[c]))
return roc, ap, f1
######################################################
# Get best embeddings
# def get_embeddings(model, data_loader, device):
@torch.no_grad()
def get_embeddings(model, data, device):
"""
Get best embeddings
Args
- model (torch object): best model
- data (Data object): dataset
- device (torch object): cpu or cuda
Return
- all_emb (tensor): best embedding for all nodes
"""
model.eval()
data = data.to(device)
all_emb = model(data.x, data.edge_index)
print(all_emb.shape)
return all_emb
| 33.59276 | 116 | 0.565867 |
8b05f4ddbeaa7db6bc6f4e796172f5a91bda9ec1 | 2,434 | py | Python | BackTesting/BAIS.py | Jacarlianda/FinMind | 181e478727c7cda498da2b42495e2a6fea9688e3 | [
"Apache-2.0"
] | null | null | null | BackTesting/BAIS.py | Jacarlianda/FinMind | 181e478727c7cda498da2b42495e2a6fea9688e3 | [
"Apache-2.0"
] | null | null | null | BackTesting/BAIS.py | Jacarlianda/FinMind | 181e478727c7cda498da2b42495e2a6fea9688e3 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import requests
# class name,必須跟檔案名一致,例如 class BAIS,檔名也是 BAIS.py
class BAIS:
def __init__(self,
stock_price,
**kwargs, ):
# -------------------------------------------------------------------
# 此區塊請勿更動
stock_price = stock_price.sort_values('date')
# 股價
self.stock_price = stock_price
# 融資融券
self.MarginPurchaseShortSale = kwargs.get("MarginPurchaseShortSale", pd.DataFrame())
# 三大法人買賣
self.InstitutionalInvestorsBuySell = kwargs.get("InstitutionalInvestorsBuySell", pd.DataFrame())
# 外資持股
self.Shareholding = kwargs.get("Shareholding", pd.DataFrame())
# 此區塊請勿更動
# -------------------------------------------------------------------
self.ma_days = 24
self.bais_lower = -7
self.bais_upper = 8
self.create_feature() # 建立自己的 feature or 技術指標
def create_feature(self):
self.stock_price['ma{}'.format(self.ma_days)] = self.stock_price['close'].rolling(window=self.ma_days).mean()
self.stock_price['bais'] = \
((self.stock_price['close'] - self.stock_price['ma{}'.format(self.ma_days)]) \
/ self.stock_price['ma{}'.format(self.ma_days)]) * 100
self.stock_price = self.stock_price.dropna()
self.stock_price.index = range(len(self.stock_price))
def trade(self, date):
'''
此區塊,可進行資料處理、做技術指標,寫自己的策略,
寫你自己的策略, 必須 return : 1 (買) or -1 (賣) or 0 (不操作)
根據時間date,回傳當下要進行什麼操作 ( 買/賣/不操作 )
date : 昨天時間
用昨天的資料,計算技術指標,判斷今天買/賣
'''
# example
value = self.stock_price[self.stock_price['date'] == date]
if len(value) == 0:
return 0
bais = value['bais'].values[0]
if bais < self.bais_lower:
return 1
elif bais > self.bais_upper:
return -1
else:
return 0
def test():
form_data = {'dataset': 'TaiwanStockPrice',
'stock_id': '2317',
'date': '2019-01-01'}
url = 'http://finmindapi.servebeer.com/api/data'
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
stock_price = pd.DataFrame(temp['data'])
date = '2019-05-03'
self = BAIS(stock_price)
self.trade(date)
self.trade('2019-05-07')
self.trade('2019-05-11')
| 31.205128 | 117 | 0.534511 |
1a66f31cf54afbc4f4633af2105c8ceb245fb5c4 | 3,737 | py | Python | yolo/yolo.py | phil-hoang/general-object-detector | a59fcfd4cf237dda7bde370b947d0d3096631d56 | [
"MIT"
] | null | null | null | yolo/yolo.py | phil-hoang/general-object-detector | a59fcfd4cf237dda7bde370b947d0d3096631d56 | [
"MIT"
] | null | null | null | yolo/yolo.py | phil-hoang/general-object-detector | a59fcfd4cf237dda7bde370b947d0d3096631d56 | [
"MIT"
] | null | null | null | """
YOLOv3 Model
"""
import numpy as np
import torch
import torch.nn as nn
from torch import jit
import torchvision.transforms as T
from torchvision.ops import nms
def coco80_to_coco91_class(label):
# converts 80-index (val2014) to 91-index (paper)
coco91_classes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90, 91]
x = [coco91_classes[i] for i in label]
x = torch.tensor(x, dtype=torch.long)
return x
def yolo_to_coco_traffic(label):
"""
Converts 0-index yolo data to custom COCO traffic data.
The custom dataset has the same labels as COCO, with the extensions 92,93 and 94.
"""
traffic_classes = np.arange(1, 15)
x = [traffic_classes[i] for i in label]
# Map traffic labels to COCO label.
traffic_to_coco = {1:1 , 2:2 ,3:3 ,4:4 ,5:6 ,6:7 ,7:8 ,8:11 , 9:13 , 10:17 ,11:18 , 12:92 , 13:93 , 14:94}
x = [traffic_to_coco[i] for i in x]
x = torch.tensor(x, dtype=torch.long)
return x
def yolo_model():
"""
Loads the YOLOv5 model from ultralytics
"""
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, force_reload = True)
model.eval()
return model
def yolo_model_traffic():
"""
Loads the custom YOLOv5 model. It has to be placed into /yolo.
"""
weights = 'yolo/yolov5sTraffic.pt'
model = torch.hub.load('ultralytics/yolov5', 'custom', weights, force_reload=True)
model.eval()
return model
def yolo_predict(model, frame, thresh = 0.6):
"""
Predict with yolo model
Args:
frame - OpenCV image in BGR
Return:
boxes -- Torch tensor of coordinates of the top left and bottom right of the bounding box ordered as [(x1, y1, x2, y2)]
labels -- Torch tensor of index labels for each bounding box [<label indices>]
scores -- Torch tensor of class confidence scores for each bounding box [<class scores>]. For COCO, expects 91 different classes
"""
# Predict
output = model(frame)
# Unpack the output
result = output.xyxy[0]
boxes = result[:,:4]
conf = result[:,4]
labels = result[:,5].type(torch.LongTensor)
# Apply threshold
keep = conf > thresh
boxes = boxes[keep]
conf = conf[keep]
labels = labels[keep]
# Convert COCO labels because some classes were removed
labels = coco80_to_coco91_class(labels)
return boxes, labels, conf
def yolo_traffic_predict(model, frame, thresh = 0.6):
"""
Predict with yolo model trained to detect traffic light status and more.
Args:
frame - OpenCV image in BGR
Return:
boxes -- Torch tensor of coordinates of the top left and bottom right of the bounding box ordered as [(x1, y1, x2, y2)]
labels -- Torch tensor of index labels for each bounding box [<label indices>]
scores -- Torch tensor of class confidence scores for each bounding box [<class scores>]. For COCO, expects 91 different classes
"""
# Predict
output = model(frame)
# Unpack the output
result = output.xyxy[0]
boxes = result[:,:4]
conf = result[:,4]
labels = result[:,5].type(torch.LongTensor)
# Apply threshold
keep = conf > thresh
boxes = boxes[keep]
conf = conf[keep]
labels = labels[keep] # In 0-indexed yolo format
# Convert COCO labels because some classes were removed
labels = yolo_to_coco_traffic(labels)
return boxes, labels, conf
| 28.968992 | 138 | 0.632861 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.